1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 #include "uv.h"
22 #include "uv_log.h"
23 #include "internal.h"
24 #include "strtok.h"
25
26 #include <stddef.h> /* NULL */
27 #include <stdio.h> /* printf */
28 #include <stdlib.h>
29 #include <string.h> /* strerror */
30 #include <errno.h>
31 #include <assert.h>
32 #include <unistd.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <fcntl.h> /* O_CLOEXEC */
36 #include <sys/ioctl.h>
37 #include <sys/socket.h>
38 #include <sys/un.h>
39 #include <netinet/in.h>
40 #include <arpa/inet.h>
41 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
42 #include <sys/uio.h> /* writev */
43 #include <sys/resource.h> /* getrusage */
44 #include <pwd.h>
45 #include <grp.h>
46 #include <sys/utsname.h>
47 #include <sys/time.h>
48 #include <time.h> /* clock_gettime */
49
50 #ifdef __sun
51 # include <sys/filio.h>
52 # include <sys/wait.h>
53 #endif
54
55 #if defined(__APPLE__)
56 # include <sys/filio.h>
57 # endif /* defined(__APPLE__) */
58
59
60 #if defined(__APPLE__) && !TARGET_OS_IPHONE
61 # include <crt_externs.h>
62 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
63 # define environ (*_NSGetEnviron())
64 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
65 extern char** environ;
66 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
67
68
69 #if defined(__DragonFly__) || \
70 defined(__FreeBSD__) || \
71 defined(__NetBSD__) || \
72 defined(__OpenBSD__)
73 # include <sys/sysctl.h>
74 # include <sys/filio.h>
75 # include <sys/wait.h>
76 # include <sys/param.h>
77 # if defined(__FreeBSD__)
78 # include <sys/cpuset.h>
79 # define uv__accept4 accept4
80 # endif
81 # if defined(__NetBSD__)
82 # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
83 # endif
84 #endif
85
86 #if defined(__MVS__)
87 # include <sys/ioctl.h>
88 # include "zos-sys-info.h"
89 #endif
90
91 #if defined(__linux__)
92 # include <sched.h>
93 # include <sys/syscall.h>
94 # define gettid() syscall(SYS_gettid)
95 # define uv__accept4 accept4
96 #endif
97
98 #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
99 # include <sanitizer/linux_syscall_hooks.h>
100 #endif
101
102 static void uv__run_pending(uv_loop_t* loop);
103
104 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
105 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
106 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
107 sizeof(((struct iovec*) 0)->iov_base));
108 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
109 sizeof(((struct iovec*) 0)->iov_len));
110 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
111 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
112
113
114 /* https://github.com/libuv/libuv/issues/1674 */
uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts)115 int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
116 struct timespec t;
117 int r;
118
119 if (ts == NULL)
120 return UV_EFAULT;
121
122 switch (clock_id) {
123 default:
124 return UV_EINVAL;
125 case UV_CLOCK_MONOTONIC:
126 r = clock_gettime(CLOCK_MONOTONIC, &t);
127 break;
128 case UV_CLOCK_REALTIME:
129 r = clock_gettime(CLOCK_REALTIME, &t);
130 break;
131 }
132
133 if (r)
134 return UV__ERR(errno);
135
136 ts->tv_sec = t.tv_sec;
137 ts->tv_nsec = t.tv_nsec;
138
139 return 0;
140 }
141
142
uv_hrtime(void)143 uint64_t uv_hrtime(void) {
144 return uv__hrtime(UV_CLOCK_PRECISE);
145 }
146
147
uv_close(uv_handle_t* handle, uv_close_cb close_cb)148 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
149 assert(!uv__is_closing(handle));
150
151 handle->flags |= UV_HANDLE_CLOSING;
152 handle->close_cb = close_cb;
153
154 switch (handle->type) {
155 case UV_NAMED_PIPE:
156 uv__pipe_close((uv_pipe_t*)handle);
157 break;
158
159 case UV_TTY:
160 uv__stream_close((uv_stream_t*)handle);
161 break;
162
163 case UV_TCP:
164 uv__tcp_close((uv_tcp_t*)handle);
165 break;
166
167 case UV_UDP:
168 uv__udp_close((uv_udp_t*)handle);
169 break;
170
171 case UV_PREPARE:
172 uv__prepare_close((uv_prepare_t*)handle);
173 break;
174
175 case UV_CHECK:
176 uv__check_close((uv_check_t*)handle);
177 break;
178
179 case UV_IDLE:
180 uv__idle_close((uv_idle_t*)handle);
181 break;
182
183 case UV_ASYNC:
184 uv__async_close((uv_async_t*)handle);
185 break;
186
187 case UV_TIMER:
188 uv__timer_close((uv_timer_t*)handle);
189 break;
190
191 case UV_PROCESS:
192 uv__process_close((uv_process_t*)handle);
193 break;
194
195 case UV_FS_EVENT:
196 uv__fs_event_close((uv_fs_event_t*)handle);
197 #if defined(__sun) || defined(__MVS__)
198 /*
199 * On Solaris, illumos, and z/OS we will not be able to dissociate the
200 * watcher for an event which is pending delivery, so we cannot always call
201 * uv__make_close_pending() straight away. The backend will call the
202 * function once the event has cleared.
203 */
204 return;
205 #endif
206 break;
207
208 case UV_POLL:
209 uv__poll_close((uv_poll_t*)handle);
210 break;
211
212 case UV_FS_POLL:
213 uv__fs_poll_close((uv_fs_poll_t*)handle);
214 /* Poll handles use file system requests, and one of them may still be
215 * running. The poll code will call uv__make_close_pending() for us. */
216 return;
217
218 case UV_SIGNAL:
219 uv__signal_close((uv_signal_t*) handle);
220 break;
221
222 default:
223 assert(0);
224 }
225
226 uv__make_close_pending(handle);
227 }
228
uv__socket_sockopt(uv_handle_t* handle, int optname, int* value)229 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
230 int r;
231 int fd;
232 socklen_t len;
233
234 if (handle == NULL || value == NULL)
235 return UV_EINVAL;
236
237 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
238 fd = uv__stream_fd((uv_stream_t*) handle);
239 else if (handle->type == UV_UDP)
240 fd = ((uv_udp_t *) handle)->io_watcher.fd;
241 else
242 return UV_ENOTSUP;
243
244 len = sizeof(*value);
245
246 if (*value == 0)
247 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
248 else
249 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
250
251 if (r < 0)
252 return UV__ERR(errno);
253
254 return 0;
255 }
256
uv__make_close_pending(uv_handle_t* handle)257 void uv__make_close_pending(uv_handle_t* handle) {
258 assert(handle->flags & UV_HANDLE_CLOSING);
259 assert(!(handle->flags & UV_HANDLE_CLOSED));
260 handle->next_closing = handle->loop->closing_handles;
261 handle->loop->closing_handles = handle;
262 }
263
uv__getiovmax(void)264 int uv__getiovmax(void) {
265 #if defined(IOV_MAX)
266 return IOV_MAX;
267 #elif defined(_SC_IOV_MAX)
268 static _Atomic int iovmax_cached = -1;
269 int iovmax;
270
271 iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
272 if (iovmax != -1)
273 return iovmax;
274
275 /* On some embedded devices (arm-linux-uclibc based ip camera),
276 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
277 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
278 */
279 iovmax = sysconf(_SC_IOV_MAX);
280 if (iovmax == -1)
281 iovmax = 1;
282
283 atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
284
285 return iovmax;
286 #else
287 return 1024;
288 #endif
289 }
290
291
uv__finish_close(uv_handle_t* handle)292 static void uv__finish_close(uv_handle_t* handle) {
293 uv_signal_t* sh;
294
295 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
296 * possible for it to be active in the sense that uv__is_active() returns
297 * true.
298 *
299 * A good example is when the user calls uv_shutdown(), immediately followed
300 * by uv_close(). The handle is considered active at this point because the
301 * completion of the shutdown req is still pending.
302 */
303 assert(handle->flags & UV_HANDLE_CLOSING);
304 assert(!(handle->flags & UV_HANDLE_CLOSED));
305 handle->flags |= UV_HANDLE_CLOSED;
306
307 switch (handle->type) {
308 case UV_PREPARE:
309 case UV_CHECK:
310 case UV_IDLE:
311 case UV_ASYNC:
312 case UV_TIMER:
313 case UV_PROCESS:
314 case UV_FS_EVENT:
315 case UV_FS_POLL:
316 case UV_POLL:
317 break;
318
319 case UV_SIGNAL:
320 /* If there are any caught signals "trapped" in the signal pipe,
321 * we can't call the close callback yet. Reinserting the handle
322 * into the closing queue makes the event loop spin but that's
323 * okay because we only need to deliver the pending events.
324 */
325 sh = (uv_signal_t*) handle;
326 if (sh->caught_signals > sh->dispatched_signals) {
327 handle->flags ^= UV_HANDLE_CLOSED;
328 uv__make_close_pending(handle); /* Back into the queue. */
329 return;
330 }
331 break;
332
333 case UV_NAMED_PIPE:
334 case UV_TCP:
335 case UV_TTY:
336 uv__stream_destroy((uv_stream_t*)handle);
337 break;
338
339 case UV_UDP:
340 uv__udp_finish_close((uv_udp_t*)handle);
341 break;
342
343 default:
344 assert(0);
345 break;
346 }
347
348 uv__handle_unref(handle);
349 uv__queue_remove(&handle->handle_queue);
350
351 if (handle->close_cb) {
352 handle->close_cb(handle);
353 }
354 }
355
356
uv__run_closing_handles(uv_loop_t* loop)357 static void uv__run_closing_handles(uv_loop_t* loop) {
358 uv_handle_t* p;
359 uv_handle_t* q;
360
361 p = loop->closing_handles;
362 loop->closing_handles = NULL;
363
364 while (p) {
365 q = p->next_closing;
366 uv__finish_close(p);
367 p = q;
368 }
369 }
370
371
uv_is_closing(const uv_handle_t* handle)372 int uv_is_closing(const uv_handle_t* handle) {
373 return uv__is_closing(handle);
374 }
375
376
uv_backend_fd(const uv_loop_t* loop)377 int uv_backend_fd(const uv_loop_t* loop) {
378 return loop->backend_fd;
379 }
380
381
uv__loop_alive(const uv_loop_t* loop)382 static int uv__loop_alive(const uv_loop_t* loop) {
383 return uv__has_active_handles(loop) ||
384 uv__has_active_reqs(loop) ||
385 !uv__queue_empty(&loop->pending_queue) ||
386 loop->closing_handles != NULL;
387 }
388
389
uv__backend_timeout(const uv_loop_t* loop)390 static int uv__backend_timeout(const uv_loop_t* loop) {
391 if (loop->stop_flag == 0 &&
392 /* uv__loop_alive(loop) && */
393 (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
394 uv__queue_empty(&loop->pending_queue) &&
395 uv__queue_empty(&loop->idle_handles) &&
396 (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
397 loop->closing_handles == NULL)
398 return uv__next_timeout(loop);
399 return 0;
400 }
401
402
uv_backend_timeout(const uv_loop_t* loop)403 int uv_backend_timeout(const uv_loop_t* loop) {
404 if (uv__queue_empty(&loop->watcher_queue))
405 return uv__backend_timeout(loop);
406 /* Need to call uv_run to update the backend fd state. */
407 return 0;
408 }
409
410
uv_loop_alive(const uv_loop_t* loop)411 int uv_loop_alive(const uv_loop_t* loop) {
412 return uv__loop_alive(loop);
413 }
414
415
uv_loop_alive_taskpool(const uv_loop_t* loop, int initial_handles)416 int uv_loop_alive_taskpool(const uv_loop_t* loop, int initial_handles) {
417 return loop->active_handles > initial_handles ||
418 uv__has_active_reqs(loop) ||
419 !uv__queue_empty(&loop->pending_queue) ||
420 loop->closing_handles != NULL;
421 }
422
423
424 int is_uv_loop_good_magic(const uv_loop_t* loop);
425
426
uv_run(uv_loop_t* loop, uv_run_mode mode)427 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
428 int timeout;
429 int r;
430 int can_sleep;
431
432 if (!is_uv_loop_good_magic(loop)) {
433 return 0;
434 }
435
436 r = uv__loop_alive(loop);
437 if (!r)
438 uv__update_time(loop);
439
440 while (r != 0 && loop->stop_flag == 0) {
441 if (!is_uv_loop_good_magic(loop)) {
442 return 0;
443 }
444
445 uv__update_time(loop);
446 uv__run_timers(loop);
447
448 can_sleep =
449 uv__queue_empty(&loop->pending_queue) &&
450 uv__queue_empty(&loop->idle_handles);
451
452 uv__run_pending(loop);
453 uv__run_idle(loop);
454 uv__run_prepare(loop);
455
456 timeout = 0;
457 if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
458 timeout = uv__backend_timeout(loop);
459
460 uv__metrics_inc_loop_count(loop);
461
462 uv__io_poll(loop, timeout);
463
464 /* Process immediate callbacks (e.g. write_cb) a small fixed number of
465 * times to avoid loop starvation.*/
466 for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
467 uv__run_pending(loop);
468
469 /* Run one final update on the provider_idle_time in case uv__io_poll
470 * returned because the timeout expired, but no events were received. This
471 * call will be ignored if the provider_entry_time was either never set (if
472 * the timeout == 0) or was already updated b/c an event was received.
473 */
474 uv__metrics_update_idle_time(loop);
475
476 uv__run_check(loop);
477 uv__run_closing_handles(loop);
478
479 if (mode == UV_RUN_ONCE) {
480 /* UV_RUN_ONCE implies forward progress: at least one callback must have
481 * been invoked when it returns. uv__io_poll() can return without doing
482 * I/O (meaning: no callbacks) when its timeout expires - which means we
483 * have pending timers that satisfy the forward progress constraint.
484 *
485 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
486 * the check.
487 */
488 uv__update_time(loop);
489 uv__run_timers(loop);
490 }
491
492 r = uv__loop_alive(loop);
493 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
494 break;
495 }
496
497 /* The if statement lets gcc compile it to a conditional store. Avoids
498 * dirtying a cache line.
499 */
500 if (loop->stop_flag != 0)
501 loop->stop_flag = 0;
502
503 return r;
504 }
505
506
uv_update_time(uv_loop_t* loop)507 void uv_update_time(uv_loop_t* loop) {
508 uv__update_time(loop);
509 }
510
511
uv_is_active(const uv_handle_t* handle)512 int uv_is_active(const uv_handle_t* handle) {
513 return uv__is_active(handle);
514 }
515
516
517 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain, int type, int protocol)518 int uv__socket(int domain, int type, int protocol) {
519 int sockfd;
520 int err;
521
522 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
523 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
524 if (sockfd != -1)
525 return sockfd;
526
527 if (errno != EINVAL)
528 return UV__ERR(errno);
529 #endif
530
531 sockfd = socket(domain, type, protocol);
532 if (sockfd == -1)
533 return UV__ERR(errno);
534
535 err = uv__nonblock(sockfd, 1);
536 if (err == 0)
537 err = uv__cloexec(sockfd, 1);
538
539 if (err) {
540 uv__close(sockfd);
541 return err;
542 }
543
544 #if defined(SO_NOSIGPIPE)
545 {
546 int on = 1;
547 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
548 }
549 #endif
550
551 return sockfd;
552 }
553
554 /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char* path)555 FILE* uv__open_file(const char* path) {
556 int fd;
557 FILE* fp;
558
559 fd = uv__open_cloexec(path, O_RDONLY);
560 if (fd < 0)
561 return NULL;
562
563 fp = fdopen(fd, "r");
564 if (fp == NULL)
565 uv__close(fd);
566
567 return fp;
568 }
569
570
uv__accept(int sockfd)571 int uv__accept(int sockfd) {
572 int peerfd;
573 int err;
574
575 (void) &err;
576 assert(sockfd >= 0);
577
578 do
579 #ifdef uv__accept4
580 peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
581 #else
582 peerfd = accept(sockfd, NULL, NULL);
583 #endif
584 while (peerfd == -1 && errno == EINTR);
585
586 if (peerfd == -1)
587 return UV__ERR(errno);
588
589 #ifndef uv__accept4
590 err = uv__cloexec(peerfd, 1);
591 if (err == 0)
592 err = uv__nonblock(peerfd, 1);
593
594 if (err != 0) {
595 uv__close(peerfd);
596 return err;
597 }
598 #endif
599
600 return peerfd;
601 }
602
603
604 /* close() on macos has the "interesting" quirk that it fails with EINTR
605 * without closing the file descriptor when a thread is in the cancel state.
606 * That's why libuv calls close$NOCANCEL() instead.
607 *
608 * glibc on linux has a similar issue: close() is a cancellation point and
609 * will unwind the thread when it's in the cancel state. Work around that
610 * by making the system call directly. Musl libc is unaffected.
611 */
uv__close_nocancel(int fd)612 int uv__close_nocancel(int fd) {
613 #if defined(__APPLE__)
614 #pragma GCC diagnostic push
615 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
616 #if defined(__LP64__) || TARGET_OS_IPHONE
617 extern int close$NOCANCEL(int);
618 return close$NOCANCEL(fd);
619 #else
620 extern int close$NOCANCEL$UNIX2003(int);
621 return close$NOCANCEL$UNIX2003(fd);
622 #endif
623 #pragma GCC diagnostic pop
624 #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
625 long rc;
626 __sanitizer_syscall_pre_close(fd);
627 rc = syscall(SYS_close, fd);
628 __sanitizer_syscall_post_close(rc, fd);
629 return rc;
630 #elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
631 return syscall(SYS_close, fd);
632 #else
633 return close(fd);
634 #endif
635 }
636
637
uv__close_nocheckstdio(int fd)638 int uv__close_nocheckstdio(int fd) {
639 int saved_errno;
640 int rc;
641
642 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
643
644 saved_errno = errno;
645 rc = uv__close_nocancel(fd);
646 if (rc == -1) {
647 rc = UV__ERR(errno);
648 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
649 rc = 0; /* The close is in progress, not an error. */
650 errno = saved_errno;
651 }
652
653 return rc;
654 }
655
656
uv__close(int fd)657 int uv__close(int fd) {
658 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
659 #if defined(__MVS__)
660 SAVE_ERRNO(epoll_file_close(fd));
661 #endif
662 return uv__close_nocheckstdio(fd);
663 }
664
665 #if UV__NONBLOCK_IS_IOCTL
uv__nonblock_ioctl(int fd, int set)666 int uv__nonblock_ioctl(int fd, int set) {
667 int r;
668
669 do
670 r = ioctl(fd, FIONBIO, &set);
671 while (r == -1 && errno == EINTR);
672
673 if (r)
674 return UV__ERR(errno);
675
676 return 0;
677 }
678 #endif
679
680
uv__nonblock_fcntl(int fd, int set)681 int uv__nonblock_fcntl(int fd, int set) {
682 int flags;
683 int r;
684
685 do
686 r = fcntl(fd, F_GETFL);
687 while (r == -1 && errno == EINTR);
688
689 if (r == -1)
690 return UV__ERR(errno);
691
692 /* Bail out now if already set/clear. */
693 if (!!(r & O_NONBLOCK) == !!set)
694 return 0;
695
696 if (set)
697 flags = r | O_NONBLOCK;
698 else
699 flags = r & ~O_NONBLOCK;
700
701 do
702 r = fcntl(fd, F_SETFL, flags);
703 while (r == -1 && errno == EINTR);
704
705 if (r)
706 return UV__ERR(errno);
707
708 return 0;
709 }
710
711
uv__cloexec(int fd, int set)712 int uv__cloexec(int fd, int set) {
713 int flags;
714 int r;
715
716 flags = 0;
717 if (set)
718 flags = FD_CLOEXEC;
719
720 do
721 r = fcntl(fd, F_SETFD, flags);
722 while (r == -1 && errno == EINTR);
723
724 if (r)
725 return UV__ERR(errno);
726
727 return 0;
728 }
729
730
uv__recvmsg(int fd, struct msghdr* msg, int flags)731 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
732 #if defined(__ANDROID__) || \
733 defined(__DragonFly__) || \
734 defined(__FreeBSD__) || \
735 defined(__NetBSD__) || \
736 defined(__OpenBSD__) || \
737 defined(__linux__)
738 ssize_t rc;
739 rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
740 if (rc == -1)
741 return UV__ERR(errno);
742 return rc;
743 #else
744 struct cmsghdr* cmsg;
745 int* pfd;
746 int* end;
747 ssize_t rc;
748 rc = recvmsg(fd, msg, flags);
749 if (rc == -1)
750 return UV__ERR(errno);
751 if (msg->msg_controllen == 0)
752 return rc;
753 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
754 if (cmsg->cmsg_type == SCM_RIGHTS)
755 for (pfd = (int*) CMSG_DATA(cmsg),
756 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
757 pfd < end;
758 pfd += 1)
759 uv__cloexec(*pfd, 1);
760 return rc;
761 #endif
762 }
763
764
uv_cwd(char* buffer, size_t* size)765 int uv_cwd(char* buffer, size_t* size) {
766 char scratch[1 + UV__PATH_MAX];
767
768 if (buffer == NULL || size == NULL)
769 return UV_EINVAL;
770
771 /* Try to read directly into the user's buffer first... */
772 if (getcwd(buffer, *size) != NULL)
773 goto fixup;
774
775 if (errno != ERANGE)
776 return UV__ERR(errno);
777
778 /* ...or into scratch space if the user's buffer is too small
779 * so we can report how much space to provide on the next try.
780 */
781 if (getcwd(scratch, sizeof(scratch)) == NULL)
782 return UV__ERR(errno);
783
784 buffer = scratch;
785
786 fixup:
787
788 *size = strlen(buffer);
789
790 if (*size > 1 && buffer[*size - 1] == '/') {
791 *size -= 1;
792 buffer[*size] = '\0';
793 }
794
795 if (buffer == scratch) {
796 *size += 1;
797 return UV_ENOBUFS;
798 }
799
800 return 0;
801 }
802
803
uv_chdir(const char* dir)804 int uv_chdir(const char* dir) {
805 if (chdir(dir))
806 return UV__ERR(errno);
807
808 return 0;
809 }
810
811
uv_disable_stdio_inheritance(void)812 void uv_disable_stdio_inheritance(void) {
813 int fd;
814
815 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
816 * first 16 file descriptors. After that, bail out after the first error.
817 */
818 for (fd = 0; ; fd++)
819 if (uv__cloexec(fd, 1) && fd > 15)
820 break;
821 }
822
823
uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd)824 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
825 int fd_out;
826
827 switch (handle->type) {
828 case UV_TCP:
829 case UV_NAMED_PIPE:
830 case UV_TTY:
831 fd_out = uv__stream_fd((uv_stream_t*) handle);
832 break;
833
834 case UV_UDP:
835 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
836 break;
837
838 case UV_POLL:
839 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
840 break;
841
842 default:
843 return UV_EINVAL;
844 }
845
846 if (uv__is_closing(handle) || fd_out == -1)
847 return UV_EBADF;
848
849 *fd = fd_out;
850 return 0;
851 }
852
853
uv__run_pending(uv_loop_t* loop)854 static void uv__run_pending(uv_loop_t* loop) {
855 struct uv__queue* q;
856 struct uv__queue pq;
857 uv__io_t* w;
858
859 uv__queue_move(&loop->pending_queue, &pq);
860
861 while (!uv__queue_empty(&pq)) {
862 q = uv__queue_head(&pq);
863 uv__queue_remove(q);
864 uv__queue_init(q);
865 w = uv__queue_data(q, uv__io_t, pending_queue);
866 w->cb(loop, w, POLLOUT);
867 }
868 }
869
870
next_power_of_two(unsigned int val)871 static unsigned int next_power_of_two(unsigned int val) {
872 val -= 1;
873 val |= val >> 1;
874 val |= val >> 2;
875 val |= val >> 4;
876 val |= val >> 8;
877 val |= val >> 16;
878 val += 1;
879 return val;
880 }
881
maybe_resize(uv_loop_t* loop, unsigned int len)882 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
883 uv__io_t** watchers;
884 void* fake_watcher_list;
885 void* fake_watcher_count;
886 unsigned int nwatchers;
887 unsigned int i;
888
889 if (len <= loop->nwatchers)
890 return;
891
892 /* Preserve fake watcher list and count at the end of the watchers */
893 if (loop->watchers != NULL) {
894 fake_watcher_list = loop->watchers[loop->nwatchers];
895 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
896 } else {
897 fake_watcher_list = NULL;
898 fake_watcher_count = NULL;
899 }
900
901 nwatchers = next_power_of_two(len + 2) - 2;
902 watchers = uv__reallocf(loop->watchers,
903 (nwatchers + 2) * sizeof(loop->watchers[0]));
904
905 if (watchers == NULL)
906 abort();
907 for (i = loop->nwatchers; i < nwatchers; i++)
908 watchers[i] = NULL;
909 watchers[nwatchers] = fake_watcher_list;
910 watchers[nwatchers + 1] = fake_watcher_count;
911
912 loop->watchers = watchers;
913 loop->nwatchers = nwatchers;
914 }
915
916
uv__io_init(uv__io_t* w, uv__io_cb cb, int fd)917 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
918 assert(cb != NULL);
919 assert(fd >= -1);
920 uv__queue_init(&w->pending_queue);
921 uv__queue_init(&w->watcher_queue);
922 w->cb = cb;
923 w->fd = fd;
924 w->events = 0;
925 w->pevents = 0;
926 }
927
928
uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events)929 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
930 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
931 assert(0 != events);
932 assert(w->fd >= 0);
933 assert(w->fd < INT_MAX);
934
935 w->pevents |= events;
936 maybe_resize(loop, w->fd + 1);
937
938 #if !defined(__sun)
939 /* The event ports backend needs to rearm all file descriptors on each and
940 * every tick of the event loop but the other backends allow us to
941 * short-circuit here if the event mask is unchanged.
942 */
943 if (w->events == w->pevents)
944 return;
945 #endif
946
947 if (uv__queue_empty(&w->watcher_queue))
948 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
949
950 if (loop->watchers[w->fd] == NULL) {
951 loop->watchers[w->fd] = w;
952 loop->nfds++;
953 }
954 }
955
956
uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events)957 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
958 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
959 assert(0 != events);
960
961 if (w->fd == -1)
962 return;
963
964 assert(w->fd >= 0);
965
966 /* Happens when uv__io_stop() is called on a handle that was never started. */
967 if ((unsigned) w->fd >= loop->nwatchers)
968 return;
969
970 w->pevents &= ~events;
971
972 if (w->pevents == 0) {
973 uv__queue_remove(&w->watcher_queue);
974 uv__queue_init(&w->watcher_queue);
975 w->events = 0;
976
977 if (w == loop->watchers[w->fd]) {
978 assert(loop->nfds > 0);
979 loop->watchers[w->fd] = NULL;
980 loop->nfds--;
981 }
982 }
983 else if (uv__queue_empty(&w->watcher_queue))
984 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
985 }
986
987
uv__io_close(uv_loop_t* loop, uv__io_t* w)988 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
989 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
990 uv__queue_remove(&w->pending_queue);
991
992 /* Remove stale events for this file descriptor */
993 if (w->fd != -1)
994 uv__platform_invalidate_fd(loop, w->fd);
995 }
996
997
uv__io_feed(uv_loop_t* loop, uv__io_t* w)998 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
999 if (uv__queue_empty(&w->pending_queue))
1000 uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
1001 }
1002
1003
uv__io_active(const uv__io_t* w, unsigned int events)1004 int uv__io_active(const uv__io_t* w, unsigned int events) {
1005 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
1006 assert(0 != events);
1007 return 0 != (w->pevents & events);
1008 }
1009
1010
uv__fd_exists(uv_loop_t* loop, int fd)1011 int uv__fd_exists(uv_loop_t* loop, int fd) {
1012 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
1013 }
1014
1015
uv_getrusage(uv_rusage_t* rusage)1016 int uv_getrusage(uv_rusage_t* rusage) {
1017 struct rusage usage;
1018
1019 if (getrusage(RUSAGE_SELF, &usage))
1020 return UV__ERR(errno);
1021
1022 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
1023 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
1024
1025 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
1026 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
1027
1028 #if !defined(__MVS__) && !defined(__HAIKU__)
1029 rusage->ru_maxrss = usage.ru_maxrss;
1030 rusage->ru_ixrss = usage.ru_ixrss;
1031 rusage->ru_idrss = usage.ru_idrss;
1032 rusage->ru_isrss = usage.ru_isrss;
1033 rusage->ru_minflt = usage.ru_minflt;
1034 rusage->ru_majflt = usage.ru_majflt;
1035 rusage->ru_nswap = usage.ru_nswap;
1036 rusage->ru_inblock = usage.ru_inblock;
1037 rusage->ru_oublock = usage.ru_oublock;
1038 rusage->ru_msgsnd = usage.ru_msgsnd;
1039 rusage->ru_msgrcv = usage.ru_msgrcv;
1040 rusage->ru_nsignals = usage.ru_nsignals;
1041 rusage->ru_nvcsw = usage.ru_nvcsw;
1042 rusage->ru_nivcsw = usage.ru_nivcsw;
1043 #endif
1044
1045 /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
1046 * the outliers because of course they are.
1047 */
1048 #if defined(__APPLE__)
1049 rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
1050 #elif defined(__sun)
1051 rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
1052 #endif
1053
1054 return 0;
1055 }
1056
1057
uv__open_cloexec(const char* path, int flags)1058 int uv__open_cloexec(const char* path, int flags) {
1059 #if defined(O_CLOEXEC)
1060 int fd;
1061
1062 fd = open(path, flags | O_CLOEXEC);
1063 if (fd == -1)
1064 return UV__ERR(errno);
1065
1066 return fd;
1067 #else /* O_CLOEXEC */
1068 int err;
1069 int fd;
1070
1071 fd = open(path, flags);
1072 if (fd == -1)
1073 return UV__ERR(errno);
1074
1075 err = uv__cloexec(fd, 1);
1076 if (err) {
1077 uv__close(fd);
1078 return err;
1079 }
1080
1081 return fd;
1082 #endif /* O_CLOEXEC */
1083 }
1084
1085
uv__slurp(const char* filename, char* buf, size_t len)1086 int uv__slurp(const char* filename, char* buf, size_t len) {
1087 ssize_t n;
1088 int fd;
1089
1090 assert(len > 0);
1091
1092 fd = uv__open_cloexec(filename, O_RDONLY);
1093 if (fd < 0)
1094 return fd;
1095
1096 do
1097 n = read(fd, buf, len - 1);
1098 while (n == -1 && errno == EINTR);
1099
1100 if (uv__close_nocheckstdio(fd))
1101 abort();
1102
1103 if (n < 0)
1104 return UV__ERR(errno);
1105
1106 buf[n] = '\0';
1107
1108 return 0;
1109 }
1110
1111
uv__dup2_cloexec(int oldfd, int newfd)1112 int uv__dup2_cloexec(int oldfd, int newfd) {
1113 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1114 int r;
1115
1116 r = dup3(oldfd, newfd, O_CLOEXEC);
1117 if (r == -1)
1118 return UV__ERR(errno);
1119
1120 return r;
1121 #else
1122 int err;
1123 int r;
1124
1125 r = dup2(oldfd, newfd); /* Never retry. */
1126 if (r == -1)
1127 return UV__ERR(errno);
1128
1129 err = uv__cloexec(newfd, 1);
1130 if (err != 0) {
1131 uv__close(newfd);
1132 return err;
1133 }
1134
1135 return r;
1136 #endif
1137 }
1138
1139
uv_os_homedir(char* buffer, size_t* size)1140 int uv_os_homedir(char* buffer, size_t* size) {
1141 uv_passwd_t pwd;
1142 size_t len;
1143 int r;
1144
1145 /* Check if the HOME environment variable is set first. The task of
1146 performing input validation on buffer and size is taken care of by
1147 uv_os_getenv(). */
1148 r = uv_os_getenv("HOME", buffer, size);
1149
1150 if (r != UV_ENOENT)
1151 return r;
1152
1153 /* HOME is not set, so call uv_os_get_passwd() */
1154 r = uv_os_get_passwd(&pwd);
1155
1156 if (r != 0) {
1157 return r;
1158 }
1159
1160 len = strlen(pwd.homedir);
1161
1162 if (len >= *size) {
1163 *size = len + 1;
1164 uv_os_free_passwd(&pwd);
1165 return UV_ENOBUFS;
1166 }
1167
1168 memcpy(buffer, pwd.homedir, len + 1);
1169 *size = len;
1170 uv_os_free_passwd(&pwd);
1171
1172 return 0;
1173 }
1174
1175
uv_os_tmpdir(char* buffer, size_t* size)1176 int uv_os_tmpdir(char* buffer, size_t* size) {
1177 const char* buf;
1178 size_t len;
1179
1180 if (buffer == NULL || size == NULL || *size == 0)
1181 return UV_EINVAL;
1182
1183 #define CHECK_ENV_VAR(name) \
1184 do { \
1185 buf = getenv(name); \
1186 if (buf != NULL) \
1187 goto return_buffer; \
1188 } \
1189 while (0)
1190
1191 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1192 CHECK_ENV_VAR("TMPDIR");
1193 CHECK_ENV_VAR("TMP");
1194 CHECK_ENV_VAR("TEMP");
1195 CHECK_ENV_VAR("TEMPDIR");
1196
1197 #undef CHECK_ENV_VAR
1198
1199 /* No temp environment variables defined */
1200 #if defined(__ANDROID__)
1201 buf = "/data/local/tmp";
1202 #else
1203 buf = "/tmp";
1204 #endif
1205
1206 return_buffer:
1207 len = strlen(buf);
1208
1209 if (len >= *size) {
1210 *size = len + 1;
1211 return UV_ENOBUFS;
1212 }
1213
1214 /* The returned directory should not have a trailing slash. */
1215 if (len > 1 && buf[len - 1] == '/') {
1216 len--;
1217 }
1218
1219 memcpy(buffer, buf, len + 1);
1220 buffer[len] = '\0';
1221 *size = len;
1222
1223 return 0;
1224 }
1225
1226
uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid)1227 static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
1228 struct passwd pw;
1229 struct passwd* result;
1230 char* buf;
1231 size_t bufsize;
1232 size_t name_size;
1233 size_t homedir_size;
1234 size_t shell_size;
1235 int r;
1236
1237 if (pwd == NULL)
1238 return UV_EINVAL;
1239
1240 /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
1241 * is frequently 1024 or 4096, so we can just use that directly. The pwent
1242 * will not usually be large. */
1243 for (bufsize = 2000;; bufsize *= 2) {
1244 buf = uv__malloc(bufsize);
1245
1246 if (buf == NULL)
1247 return UV_ENOMEM;
1248
1249 do
1250 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1251 while (r == EINTR);
1252
1253 if (r != 0 || result == NULL)
1254 uv__free(buf);
1255
1256 if (r != ERANGE)
1257 break;
1258 }
1259
1260 if (r != 0)
1261 return UV__ERR(r);
1262
1263 if (result == NULL)
1264 return UV_ENOENT;
1265
1266 /* Allocate memory for the username, shell, and home directory */
1267 name_size = strlen(pw.pw_name) + 1;
1268 homedir_size = strlen(pw.pw_dir) + 1;
1269 shell_size = strlen(pw.pw_shell) + 1;
1270 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1271
1272 if (pwd->username == NULL) {
1273 uv__free(buf);
1274 return UV_ENOMEM;
1275 }
1276
1277 /* Copy the username */
1278 memcpy(pwd->username, pw.pw_name, name_size);
1279
1280 /* Copy the home directory */
1281 pwd->homedir = pwd->username + name_size;
1282 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1283
1284 /* Copy the shell */
1285 pwd->shell = pwd->homedir + homedir_size;
1286 memcpy(pwd->shell, pw.pw_shell, shell_size);
1287
1288 /* Copy the uid and gid */
1289 pwd->uid = pw.pw_uid;
1290 pwd->gid = pw.pw_gid;
1291
1292 uv__free(buf);
1293
1294 return 0;
1295 }
1296
1297
uv_os_get_group(uv_group_t* grp, uv_uid_t gid)1298 int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
1299 #if defined(__ANDROID__) && __ANDROID_API__ < 24
1300 /* This function getgrgid_r() was added in Android N (level 24) */
1301 return UV_ENOSYS;
1302 #else
1303 struct group gp;
1304 struct group* result;
1305 char* buf;
1306 char* gr_mem;
1307 size_t bufsize;
1308 size_t name_size;
1309 long members;
1310 size_t mem_size;
1311 int r;
1312
1313 if (grp == NULL)
1314 return UV_EINVAL;
1315
1316 /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
1317 * is frequently 1024 or 4096, so we can just use that directly. The pwent
1318 * will not usually be large. */
1319 for (bufsize = 2000;; bufsize *= 2) {
1320 buf = uv__malloc(bufsize);
1321
1322 if (buf == NULL)
1323 return UV_ENOMEM;
1324
1325 do
1326 r = getgrgid_r(gid, &gp, buf, bufsize, &result);
1327 while (r == EINTR);
1328
1329 if (r != 0 || result == NULL)
1330 uv__free(buf);
1331
1332 if (r != ERANGE)
1333 break;
1334 }
1335
1336 if (r != 0)
1337 return UV__ERR(r);
1338
1339 if (result == NULL)
1340 return UV_ENOENT;
1341
1342 /* Allocate memory for the groupname and members. */
1343 name_size = strlen(gp.gr_name) + 1;
1344 members = 0;
1345 mem_size = sizeof(char*);
1346 for (r = 0; gp.gr_mem[r] != NULL; r++) {
1347 mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
1348 members++;
1349 }
1350
1351 gr_mem = uv__malloc(name_size + mem_size);
1352 if (gr_mem == NULL) {
1353 uv__free(buf);
1354 return UV_ENOMEM;
1355 }
1356
1357 /* Copy the members */
1358 grp->members = (char**) gr_mem;
1359 grp->members[members] = NULL;
1360 gr_mem = (char*) &grp->members[members + 1];
1361 for (r = 0; r < members; r++) {
1362 grp->members[r] = gr_mem;
1363 strcpy(gr_mem, gp.gr_mem[r]);
1364 gr_mem += strlen(gr_mem) + 1;
1365 }
1366 assert(gr_mem == (char*)grp->members + mem_size);
1367
1368 /* Copy the groupname */
1369 grp->groupname = gr_mem;
1370 memcpy(grp->groupname, gp.gr_name, name_size);
1371 gr_mem += name_size;
1372
1373 /* Copy the gid */
1374 grp->gid = gp.gr_gid;
1375
1376 uv__free(buf);
1377
1378 return 0;
1379 #endif
1380 }
1381
1382
uv_os_get_passwd(uv_passwd_t* pwd)1383 int uv_os_get_passwd(uv_passwd_t* pwd) {
1384 return uv__getpwuid_r(pwd, geteuid());
1385 }
1386
1387
uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid)1388 int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
1389 return uv__getpwuid_r(pwd, uid);
1390 }
1391
1392
uv_translate_sys_error(int sys_errno)1393 int uv_translate_sys_error(int sys_errno) {
1394 /* If < 0 then it's already a libuv error. */
1395 return sys_errno <= 0 ? sys_errno : -sys_errno;
1396 }
1397
1398
uv_os_environ(uv_env_item_t** envitems, int* count)1399 int uv_os_environ(uv_env_item_t** envitems, int* count) {
1400 int i, j, cnt;
1401 uv_env_item_t* envitem;
1402
1403 *envitems = NULL;
1404 *count = 0;
1405
1406 for (i = 0; environ[i] != NULL; i++);
1407
1408 *envitems = uv__calloc(i, sizeof(**envitems));
1409
1410 if (*envitems == NULL)
1411 return UV_ENOMEM;
1412
1413 for (j = 0, cnt = 0; j < i; j++) {
1414 char* buf;
1415 char* ptr;
1416
1417 if (environ[j] == NULL)
1418 break;
1419
1420 buf = uv__strdup(environ[j]);
1421 if (buf == NULL)
1422 goto fail;
1423
1424 ptr = strchr(buf, '=');
1425 if (ptr == NULL) {
1426 uv__free(buf);
1427 continue;
1428 }
1429
1430 *ptr = '\0';
1431
1432 envitem = &(*envitems)[cnt];
1433 envitem->name = buf;
1434 envitem->value = ptr + 1;
1435
1436 cnt++;
1437 }
1438
1439 *count = cnt;
1440 return 0;
1441
1442 fail:
1443 for (i = 0; i < cnt; i++) {
1444 envitem = &(*envitems)[cnt];
1445 uv__free(envitem->name);
1446 }
1447 uv__free(*envitems);
1448
1449 *envitems = NULL;
1450 *count = 0;
1451 return UV_ENOMEM;
1452 }
1453
1454
uv_os_getenv(const char* name, char* buffer, size_t* size)1455 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1456 char* var;
1457 size_t len;
1458
1459 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1460 return UV_EINVAL;
1461
1462 var = getenv(name);
1463
1464 if (var == NULL)
1465 return UV_ENOENT;
1466
1467 len = strlen(var);
1468
1469 if (len >= *size) {
1470 *size = len + 1;
1471 return UV_ENOBUFS;
1472 }
1473
1474 memcpy(buffer, var, len + 1);
1475 *size = len;
1476
1477 return 0;
1478 }
1479
1480
uv_os_setenv(const char* name, const char* value)1481 int uv_os_setenv(const char* name, const char* value) {
1482 if (name == NULL || value == NULL)
1483 return UV_EINVAL;
1484
1485 if (setenv(name, value, 1) != 0)
1486 return UV__ERR(errno);
1487
1488 return 0;
1489 }
1490
1491
uv_os_unsetenv(const char* name)1492 int uv_os_unsetenv(const char* name) {
1493 if (name == NULL)
1494 return UV_EINVAL;
1495
1496 if (unsetenv(name) != 0)
1497 return UV__ERR(errno);
1498
1499 return 0;
1500 }
1501
1502
uv_os_gethostname(char* buffer, size_t* size)1503 int uv_os_gethostname(char* buffer, size_t* size) {
1504 /*
1505 On some platforms, if the input buffer is not large enough, gethostname()
1506 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1507 instead by creating a large enough buffer and comparing the hostname length
1508 to the size input.
1509 */
1510 char buf[UV_MAXHOSTNAMESIZE];
1511 size_t len;
1512
1513 if (buffer == NULL || size == NULL || *size == 0)
1514 return UV_EINVAL;
1515
1516 if (gethostname(buf, sizeof(buf)) != 0)
1517 return UV__ERR(errno);
1518
1519 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1520 len = strlen(buf);
1521
1522 if (len >= *size) {
1523 *size = len + 1;
1524 return UV_ENOBUFS;
1525 }
1526
1527 memcpy(buffer, buf, len + 1);
1528 *size = len;
1529 return 0;
1530 }
1531
1532
uv_get_osfhandle(int fd)1533 uv_os_fd_t uv_get_osfhandle(int fd) {
1534 return fd;
1535 }
1536
uv_open_osfhandle(uv_os_fd_t os_fd)1537 int uv_open_osfhandle(uv_os_fd_t os_fd) {
1538 return os_fd;
1539 }
1540
uv_os_getpid(void)1541 uv_pid_t uv_os_getpid(void) {
1542 return getpid();
1543 }
1544
1545
uv_os_getppid(void)1546 uv_pid_t uv_os_getppid(void) {
1547 return getppid();
1548 }
1549
uv_cpumask_size(void)1550 int uv_cpumask_size(void) {
1551 #if UV__CPU_AFFINITY_SUPPORTED
1552 return CPU_SETSIZE;
1553 #else
1554 return UV_ENOTSUP;
1555 #endif
1556 }
1557
uv_os_getpriority(uv_pid_t pid, int* priority)1558 int uv_os_getpriority(uv_pid_t pid, int* priority) {
1559 int r;
1560
1561 if (priority == NULL)
1562 return UV_EINVAL;
1563
1564 errno = 0;
1565 r = getpriority(PRIO_PROCESS, (int) pid);
1566
1567 if (r == -1 && errno != 0)
1568 return UV__ERR(errno);
1569
1570 *priority = r;
1571 return 0;
1572 }
1573
1574
uv_os_setpriority(uv_pid_t pid, int priority)1575 int uv_os_setpriority(uv_pid_t pid, int priority) {
1576 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1577 return UV_EINVAL;
1578
1579 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1580 return UV__ERR(errno);
1581
1582 return 0;
1583 }
1584
1585 /**
1586 * If the function succeeds, the return value is 0.
1587 * If the function fails, the return value is non-zero.
1588 * for Linux, when schedule policy is SCHED_OTHER (default), priority is 0.
1589 * So the output parameter priority is actually the nice value.
1590 */
uv_thread_getpriority(uv_thread_t tid, int* priority)1591 int uv_thread_getpriority(uv_thread_t tid, int* priority) {
1592 int r;
1593 int policy;
1594 struct sched_param param;
1595 #ifdef __linux__
1596 pid_t pid = gettid();
1597 #endif
1598
1599 if (priority == NULL)
1600 return UV_EINVAL;
1601
1602 r = pthread_getschedparam(tid, &policy, ¶m);
1603 if (r != 0)
1604 return UV__ERR(errno);
1605
1606 #ifdef __linux__
1607 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self())) {
1608 errno = 0;
1609 r = getpriority(PRIO_PROCESS, pid);
1610 if (r == -1 && errno != 0)
1611 return UV__ERR(errno);
1612 *priority = r;
1613 return 0;
1614 }
1615 #endif
1616
1617 *priority = param.sched_priority;
1618 return 0;
1619 }
1620
1621 #ifdef __linux__
set_nice_for_calling_thread(int priority)1622 static int set_nice_for_calling_thread(int priority) {
1623 int r;
1624 int nice;
1625
1626 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1627 return UV_EINVAL;
1628
1629 pid_t pid = gettid();
1630 nice = 0 - priority * 2;
1631 r = setpriority(PRIO_PROCESS, pid, nice);
1632 if (r != 0)
1633 return UV__ERR(errno);
1634 return 0;
1635 }
1636 #endif
1637
1638 /**
1639 * If the function succeeds, the return value is 0.
1640 * If the function fails, the return value is non-zero.
1641 */
uv_thread_setpriority(uv_thread_t tid, int priority)1642 int uv_thread_setpriority(uv_thread_t tid, int priority) {
1643 int r;
1644 int min;
1645 int max;
1646 int range;
1647 int prio;
1648 int policy;
1649 struct sched_param param;
1650
1651 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1652 return UV_EINVAL;
1653
1654 r = pthread_getschedparam(tid, &policy, ¶m);
1655 if (r != 0)
1656 return UV__ERR(errno);
1657
1658 #ifdef __linux__
1659 /**
1660 * for Linux, when schedule policy is SCHED_OTHER (default), priority must be 0,
1661 * we should set the nice value in this case.
1662 */
1663 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self()))
1664 return set_nice_for_calling_thread(priority);
1665 #endif
1666
1667 #ifdef __PASE__
1668 min = 1;
1669 max = 127;
1670 #else
1671 min = sched_get_priority_min(policy);
1672 max = sched_get_priority_max(policy);
1673 #endif
1674
1675 if (min == -1 || max == -1)
1676 return UV__ERR(errno);
1677
1678 range = max - min;
1679
1680 switch (priority) {
1681 case UV_THREAD_PRIORITY_HIGHEST:
1682 prio = max;
1683 break;
1684 case UV_THREAD_PRIORITY_ABOVE_NORMAL:
1685 prio = min + range * 3 / 4;
1686 break;
1687 case UV_THREAD_PRIORITY_NORMAL:
1688 prio = min + range / 2;
1689 break;
1690 case UV_THREAD_PRIORITY_BELOW_NORMAL:
1691 prio = min + range / 4;
1692 break;
1693 case UV_THREAD_PRIORITY_LOWEST:
1694 prio = min;
1695 break;
1696 default:
1697 return 0;
1698 }
1699
1700 if (param.sched_priority != prio) {
1701 param.sched_priority = prio;
1702 r = pthread_setschedparam(tid, policy, ¶m);
1703 if (r != 0)
1704 return UV__ERR(errno);
1705 }
1706
1707 return 0;
1708 }
1709
uv_os_uname(uv_utsname_t* buffer)1710 int uv_os_uname(uv_utsname_t* buffer) {
1711 struct utsname buf;
1712 int r;
1713
1714 if (buffer == NULL)
1715 return UV_EINVAL;
1716
1717 if (uname(&buf) == -1) {
1718 r = UV__ERR(errno);
1719 goto error;
1720 }
1721
1722 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1723 if (r == UV_E2BIG)
1724 goto error;
1725
1726 #ifdef _AIX
1727 r = snprintf(buffer->release,
1728 sizeof(buffer->release),
1729 "%s.%s",
1730 buf.version,
1731 buf.release);
1732 if (r >= sizeof(buffer->release)) {
1733 r = UV_E2BIG;
1734 goto error;
1735 }
1736 #else
1737 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1738 if (r == UV_E2BIG)
1739 goto error;
1740 #endif
1741
1742 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1743 if (r == UV_E2BIG)
1744 goto error;
1745
1746 #if defined(_AIX) || defined(__PASE__)
1747 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1748 #else
1749 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1750 #endif
1751
1752 if (r == UV_E2BIG)
1753 goto error;
1754
1755 return 0;
1756
1757 error:
1758 buffer->sysname[0] = '\0';
1759 buffer->release[0] = '\0';
1760 buffer->version[0] = '\0';
1761 buffer->machine[0] = '\0';
1762 return r;
1763 }
1764
uv__getsockpeername(const uv_handle_t* handle, uv__peersockfunc func, struct sockaddr* name, int* namelen)1765 int uv__getsockpeername(const uv_handle_t* handle,
1766 uv__peersockfunc func,
1767 struct sockaddr* name,
1768 int* namelen) {
1769 socklen_t socklen;
1770 uv_os_fd_t fd;
1771 int r;
1772
1773 r = uv_fileno(handle, &fd);
1774 if (r < 0)
1775 return r;
1776
1777 /* sizeof(socklen_t) != sizeof(int) on some systems. */
1778 socklen = (socklen_t) *namelen;
1779
1780 if (func(fd, name, &socklen))
1781 return UV__ERR(errno);
1782
1783 *namelen = (int) socklen;
1784 return 0;
1785 }
1786
uv_gettimeofday(uv_timeval64_t* tv)1787 int uv_gettimeofday(uv_timeval64_t* tv) {
1788 struct timeval time;
1789
1790 if (tv == NULL)
1791 return UV_EINVAL;
1792
1793 if (gettimeofday(&time, NULL) != 0)
1794 return UV__ERR(errno);
1795
1796 tv->tv_sec = (int64_t) time.tv_sec;
1797 tv->tv_usec = (int32_t) time.tv_usec;
1798 return 0;
1799 }
1800
uv_sleep(unsigned int msec)1801 void uv_sleep(unsigned int msec) {
1802 struct timespec timeout;
1803 int rc;
1804
1805 timeout.tv_sec = msec / 1000;
1806 timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1807
1808 do
1809 rc = nanosleep(&timeout, &timeout);
1810 while (rc == -1 && errno == EINTR);
1811
1812 assert(rc == 0);
1813 }
1814
uv__search_path(const char* prog, char* buf, size_t* buflen)1815 int uv__search_path(const char* prog, char* buf, size_t* buflen) {
1816 char abspath[UV__PATH_MAX];
1817 size_t abspath_size;
1818 char trypath[UV__PATH_MAX];
1819 char* cloned_path;
1820 char* path_env;
1821 char* token;
1822 char* itr;
1823
1824 if (buf == NULL || buflen == NULL || *buflen == 0)
1825 return UV_EINVAL;
1826
1827 /*
1828 * Possibilities for prog:
1829 * i) an absolute path such as: /home/user/myprojects/nodejs/node
1830 * ii) a relative path such as: ./node or ../myprojects/nodejs/node
1831 * iii) a bare filename such as "node", after exporting PATH variable
1832 * to its location.
1833 */
1834
1835 /* Case i) and ii) absolute or relative paths */
1836 if (strchr(prog, '/') != NULL) {
1837 if (realpath(prog, abspath) != abspath)
1838 return UV__ERR(errno);
1839
1840 abspath_size = strlen(abspath);
1841
1842 *buflen -= 1;
1843 if (*buflen > abspath_size)
1844 *buflen = abspath_size;
1845
1846 memcpy(buf, abspath, *buflen);
1847 buf[*buflen] = '\0';
1848
1849 return 0;
1850 }
1851
1852 /* Case iii). Search PATH environment variable */
1853 cloned_path = NULL;
1854 token = NULL;
1855 path_env = getenv("PATH");
1856
1857 if (path_env == NULL)
1858 return UV_EINVAL;
1859
1860 cloned_path = uv__strdup(path_env);
1861 if (cloned_path == NULL)
1862 return UV_ENOMEM;
1863
1864 token = uv__strtok(cloned_path, ":", &itr);
1865 while (token != NULL) {
1866 snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
1867 if (realpath(trypath, abspath) == abspath) {
1868 /* Check the match is executable */
1869 if (access(abspath, X_OK) == 0) {
1870 abspath_size = strlen(abspath);
1871
1872 *buflen -= 1;
1873 if (*buflen > abspath_size)
1874 *buflen = abspath_size;
1875
1876 memcpy(buf, abspath, *buflen);
1877 buf[*buflen] = '\0';
1878
1879 uv__free(cloned_path);
1880 return 0;
1881 }
1882 }
1883 token = uv__strtok(NULL, ":", &itr);
1884 }
1885 uv__free(cloned_path);
1886
1887 /* Out of tokens (path entries), and no match found */
1888 return UV_EINVAL;
1889 }
1890
1891
uv_available_parallelism(void)1892 unsigned int uv_available_parallelism(void) {
1893 #ifdef __linux__
1894 cpu_set_t set;
1895 long rc;
1896
1897 memset(&set, 0, sizeof(set));
1898
1899 /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
1900 * glibc it's... complicated... so for consistency try sched_getaffinity()
1901 * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
1902 */
1903 if (0 == sched_getaffinity(0, sizeof(set), &set))
1904 rc = CPU_COUNT(&set);
1905 else
1906 rc = sysconf(_SC_NPROCESSORS_ONLN);
1907
1908 if (rc < 1)
1909 rc = 1;
1910
1911 return (unsigned) rc;
1912 #elif defined(__MVS__)
1913 int rc;
1914
1915 rc = __get_num_online_cpus();
1916 if (rc < 1)
1917 rc = 1;
1918
1919 return (unsigned) rc;
1920 #else /* __linux__ */
1921 long rc;
1922
1923 rc = sysconf(_SC_NPROCESSORS_ONLN);
1924 if (rc < 1)
1925 rc = 1;
1926
1927 return (unsigned) rc;
1928 #endif /* __linux__ */
1929 }
1930
uv_register_task_to_event(struct uv_loop_s* loop, uv_post_task func, void* handler)1931 int uv_register_task_to_event(struct uv_loop_s* loop, uv_post_task func, void* handler)
1932 {
1933 #if defined(__aarch64__)
1934 if (loop == NULL)
1935 return -1;
1936
1937 struct uv_loop_data* data = (struct uv_loop_data*)malloc(sizeof(struct uv_loop_data));
1938 if (data == NULL)
1939 return -1;
1940 if ((uint64_t)data >> UV_EVENT_MAGIC_OFFSETBITS != 0x0) {
1941 UV_LOGE("malloc address error");
1942 free(data);
1943 return -1;
1944 }
1945
1946 (void)memset(data, 0, sizeof(struct uv_loop_data));
1947 data->post_task_func = func;
1948 data->event_handler = handler;
1949 data = (struct uv_loop_data*)((uint64_t)data | (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS));
1950 loop->data = (void *)data;
1951 return 0;
1952 #else
1953 return -1;
1954 #endif
1955 }
1956
1957
uv_unregister_task_to_event(struct uv_loop_s* loop)1958 int uv_unregister_task_to_event(struct uv_loop_s* loop)
1959 {
1960 #if defined(__aarch64__)
1961 if (loop == NULL || loop->data == NULL ||
1962 ((uint64_t)loop->data >> UV_EVENT_MAGIC_OFFSETBITS) != (uint64_t)(UV_EVENT_MAGIC_OFFSET))
1963 return -1;
1964 loop->data = (struct uv_loop_data*)((uint64_t)loop->data -
1965 (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS));
1966 free(loop->data);
1967 loop->data = NULL;
1968 return 0;
1969 #else
1970 return -1;
1971 #endif
1972 }
1973
1974
uv_check_data_valid(struct uv_loop_data* data)1975 int uv_check_data_valid(struct uv_loop_data* data) {
1976 #if defined(__aarch64__)
1977 if (data == NULL || ((uint64_t)data >> UV_EVENT_MAGIC_OFFSETBITS) != (uint64_t)(UV_EVENT_MAGIC_OFFSET)) {
1978 return -1;
1979 }
1980 struct uv_loop_data* addr = (struct uv_loop_data*)((uint64_t)data -
1981 (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS));
1982 if (addr->post_task_func == NULL) {
1983 UV_LOGE("post_task_func NULL");
1984 return -1;
1985 }
1986 return 0;
1987 #else
1988 return -1;
1989 #endif
1990 }
1991
1992