Lines Matching refs:handle

43 static void uv__udp_run_completed(uv_udp_t* handle);
45 static void uv__udp_recvmsg(uv_udp_t* handle);
46 static void uv__udp_sendmsg(uv_udp_t* handle);
47 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
52 void uv__udp_close(uv_udp_t* handle) {
53 uv__io_close(handle->loop, &handle->io_watcher);
54 uv__handle_stop(handle);
56 if (handle->io_watcher.fd != -1) {
57 uv__close(handle->io_watcher.fd);
58 handle->io_watcher.fd = -1;
63 void uv__udp_finish_close(uv_udp_t* handle) {
67 assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
68 assert(handle->io_watcher.fd == -1);
70 while (!uv__queue_empty(&handle->write_queue)) {
71 q = uv__queue_head(&handle->write_queue);
76 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
79 uv__udp_run_completed(handle);
81 assert(handle->send_queue_size == 0);
82 assert(handle->send_queue_count == 0);
84 /* Now tear down the handle. */
85 handle->recv_cb = NULL;
86 handle->alloc_cb = NULL;
91 static void uv__udp_run_completed(uv_udp_t* handle) {
95 assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
96 handle->flags |= UV_HANDLE_UDP_PROCESSING;
98 while (!uv__queue_empty(&handle->write_completed_queue)) {
99 q = uv__queue_head(&handle->write_completed_queue);
103 uv__req_unregister(handle->loop, req);
105 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
106 handle->send_queue_count--;
124 if (uv__queue_empty(&handle->write_queue)) {
126 uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
127 if (!uv__io_active(&handle->io_watcher, POLLIN))
128 uv__handle_stop(handle);
131 handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
136 uv_udp_t* handle;
138 handle = container_of(w, uv_udp_t, io_watcher);
139 assert(handle->type == UV_UDP);
142 uv__udp_recvmsg(handle);
145 uv__udp_sendmsg(handle);
146 uv__udp_run_completed(handle);
150 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
179 nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
184 handle->recv_cb(handle, 0, buf, NULL, 0);
186 handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
189 for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
195 handle->recv_cb(handle,
203 if (handle->recv_cb != NULL)
204 handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
212 static void uv__udp_recvmsg(uv_udp_t* handle) {
220 assert(handle->recv_cb != NULL);
221 assert(handle->alloc_cb != NULL);
230 handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
232 handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
237 if (uv_udp_using_recvmmsg(handle)) {
238 nread = uv__udp_recvmmsg(handle, &buf);
252 nread = recvmsg(handle->io_watcher.fd, &h, 0);
258 handle->recv_cb(handle, 0, &buf, NULL, 0);
260 handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
267 handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
271 /* recv_cb callback may decide to pause or close the handle */
274 && handle->io_watcher.fd != -1
275 && handle->recv_cb != NULL);
278 static void uv__udp_sendmsg(uv_udp_t* handle) {
288 if (uv__queue_empty(&handle->write_queue))
292 for (pkts = 0, q = uv__queue_head(&handle->write_queue);
293 pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
322 npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
328 for (i = 0, q = uv__queue_head(&handle->write_queue);
329 i < pkts && q != &handle->write_queue;
330 ++i, q = uv__queue_head(&handle->write_queue)) {
337 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
339 uv__io_feed(handle->loop, &handle->io_watcher);
346 for (i = 0, q = uv__queue_head(&handle->write_queue);
347 i < (size_t)npkts && q != &handle->write_queue;
348 ++i, q = uv__queue_head(&handle->write_queue)) {
357 * why we don't handle partial writes. Just pop the request
361 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
365 if (!uv__queue_empty(&handle->write_queue))
367 uv__io_feed(handle->loop, &handle->io_watcher);
374 while (!uv__queue_empty(&handle->write_queue)) {
375 q = uv__queue_head(&handle->write_queue);
402 size = sendmsg(handle->io_watcher.fd, &h, 0);
414 * why we don't handle partial writes. Just pop the request
418 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
419 uv__io_feed(handle->loop, &handle->io_watcher);
486 int uv__udp_bind(uv_udp_t* handle,
502 fd = handle->io_watcher.fd;
508 handle->io_watcher.fd = fd;
546 handle->flags |= UV_HANDLE_IPV6;
548 handle->flags |= UV_HANDLE_BOUND;
553 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
559 if (handle->io_watcher.fd != -1)
586 return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
590 int uv__udp_connect(uv_udp_t* handle,
595 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
601 err = connect(handle->io_watcher.fd, addr, addrlen);
607 handle->flags |= UV_HANDLE_UDP_CONNECTED;
643 int uv__udp_disconnect(uv_udp_t* handle) {
667 r = connect(handle->io_watcher.fd, (struct sockaddr*) NULL, 0);
669 r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
682 handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
687 uv_udp_t* handle,
699 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
708 empty_queue = (handle->send_queue_count == 0);
710 uv__req_init(handle->loop, req, UV_UDP_SEND);
717 req->handle = handle;
725 uv__req_unregister(handle->loop, req);
730 handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
731 handle->send_queue_count++;
732 uv__queue_insert_tail(&handle->write_queue, &req->queue);
733 uv__handle_start(handle);
735 if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
736 uv__udp_sendmsg(handle);
742 if (!uv__queue_empty(&handle->write_queue))
743 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
745 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
752 int uv__udp_try_send(uv_udp_t* handle,
764 if (handle->send_queue_count != 0)
768 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
772 assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
782 size = sendmsg(handle->io_watcher.fd, &h, 0);
796 static int uv__udp_set_membership4(uv_udp_t* handle,
827 if (setsockopt(handle->io_watcher.fd,
843 static int uv__udp_set_membership6(uv_udp_t* handle,
874 if (setsockopt(handle->io_watcher.fd,
896 static int uv__udp_set_source_membership4(uv_udp_t* handle,
905 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
929 if (setsockopt(handle->io_watcher.fd,
941 static int uv__udp_set_source_membership6(uv_udp_t* handle,
951 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
978 if (setsockopt(handle->io_watcher.fd,
992 uv_udp_t* handle,
1004 uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
1005 handle->alloc_cb = NULL;
1006 handle->recv_cb = NULL;
1007 handle->send_queue_size = 0;
1008 handle->send_queue_count = 0;
1009 uv__io_init(&handle->io_watcher, uv__udp_io, fd);
1010 uv__queue_init(&handle->write_queue);
1011 uv__queue_init(&handle->write_completed_queue);
1017 int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
1019 if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
1026 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
1030 if (handle->io_watcher.fd != -1)
1033 if (uv__fd_exists(handle->loop, sock))
1044 handle->io_watcher.fd = sock;
1045 if (uv__udp_is_connected(handle))
1046 handle->flags |= UV_HANDLE_UDP_CONNECTED;
1052 int uv_udp_set_membership(uv_udp_t* handle,
1061 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
1064 return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1066 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
1069 return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1076 int uv_udp_set_source_membership(uv_udp_t* handle,
1099 return uv__udp_set_source_membership6(handle,
1109 return uv__udp_set_source_membership4(handle,
1120 static int uv__setsockopt(uv_udp_t* handle,
1127 if (handle->flags & UV_HANDLE_IPV6)
1128 r = setsockopt(handle->io_watcher.fd,
1134 r = setsockopt(handle->io_watcher.fd,
1145 static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1160 return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1164 int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1165 if (setsockopt(handle->io_watcher.fd,
1177 int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1182 if (!(handle->flags & UV_HANDLE_IPV6))
1195 return uv__setsockopt(handle,
1204 return uv__setsockopt_maybe_char(handle,
1214 int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1223 if (handle->flags & UV_HANDLE_IPV6)
1224 return uv__setsockopt(handle,
1232 return uv__setsockopt_maybe_char(handle,
1239 int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1248 if (handle->flags & UV_HANDLE_IPV6)
1249 return uv__setsockopt(handle,
1257 return uv__setsockopt_maybe_char(handle,
1263 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1273 if (handle->flags & UV_HANDLE_IPV6) {
1289 if (setsockopt(handle->io_watcher.fd,
1297 if (setsockopt(handle->io_watcher.fd,
1312 int uv_udp_getpeername(const uv_udp_t* handle,
1316 return uv__getsockpeername((const uv_handle_t*) handle,
1322 int uv_udp_getsockname(const uv_udp_t* handle,
1326 return uv__getsockpeername((const uv_handle_t*) handle,
1333 int uv__udp_recv_start(uv_udp_t* handle,
1341 if (uv__io_active(&handle->io_watcher, POLLIN))
1344 err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
1348 handle->alloc_cb = alloc_cb;
1349 handle->recv_cb = recv_cb;
1351 uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
1352 uv__handle_start(handle);
1358 int uv__udp_recv_stop(uv_udp_t* handle) {
1359 uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
1361 if (!uv__io_active(&handle->io_watcher, POLLOUT))
1362 uv__handle_stop(handle);
1364 handle->alloc_cb = NULL;
1365 handle->recv_cb = NULL;