1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22#include <assert.h> 23#include <io.h> 24 25#include "uv.h" 26#include "internal.h" 27#include "handle-inl.h" 28#include "req-inl.h" 29 30 31static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = { 32 {0xe70f1aa0, 0xab8b, 0x11cf, 33 {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}, 34 {0xf9eab0c0, 0x26d4, 0x11d0, 35 {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}}, 36 {0x9fc48064, 0x7298, 0x43e4, 37 {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}, 38 {0xa00943d9, 0x9c2e, 0x4633, 39 {0x9b, 0x59, 0x00, 0x57, 0xa3, 0x16, 0x09, 0x94}} 40}; 41 42typedef struct uv_single_fd_set_s { 43 unsigned int fd_count; 44 SOCKET fd_array[1]; 45} uv_single_fd_set_t; 46 47 48static OVERLAPPED overlapped_dummy_; 49static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT; 50 51static AFD_POLL_INFO afd_poll_info_dummy_; 52 53 54static void uv__init_overlapped_dummy(void) { 55 HANDLE event; 56 57 event = CreateEvent(NULL, TRUE, TRUE, NULL); 58 if (event == NULL) 59 uv_fatal_error(GetLastError(), "CreateEvent"); 60 61 memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_); 62 overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1); 63} 64 65 66static OVERLAPPED* uv__get_overlapped_dummy(void) { 67 uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy); 68 return &overlapped_dummy_; 69} 70 71 72static AFD_POLL_INFO* uv__get_afd_poll_info_dummy(void) { 73 return &afd_poll_info_dummy_; 74} 75 76 77static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) { 78 uv_req_t* req; 79 AFD_POLL_INFO* afd_poll_info; 80 int result; 81 82 /* Find a yet unsubmitted req to submit. */ 83 if (handle->submitted_events_1 == 0) { 84 req = &handle->poll_req_1; 85 afd_poll_info = &handle->afd_poll_info_1; 86 handle->submitted_events_1 = handle->events; 87 handle->mask_events_1 = 0; 88 handle->mask_events_2 = handle->events; 89 } else if (handle->submitted_events_2 == 0) { 90 req = &handle->poll_req_2; 91 afd_poll_info = &handle->afd_poll_info_2; 92 handle->submitted_events_2 = handle->events; 93 handle->mask_events_1 = handle->events; 94 handle->mask_events_2 = 0; 95 } else { 96 /* Just wait until there's an unsubmitted req. This will happen almost 97 * immediately as one of the 2 outstanding requests is about to return. 98 * When this happens, uv__fast_poll_process_poll_req will be called, and 99 * the pending events, if needed, will be processed in a subsequent 100 * request. */ 101 return; 102 } 103 104 /* Setting Exclusive to TRUE makes the other poll request return if there is 105 * any. */ 106 afd_poll_info->Exclusive = TRUE; 107 afd_poll_info->NumberOfHandles = 1; 108 afd_poll_info->Timeout.QuadPart = INT64_MAX; 109 afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket; 110 afd_poll_info->Handles[0].Status = 0; 111 afd_poll_info->Handles[0].Events = 0; 112 113 if (handle->events & UV_READABLE) { 114 afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE | 115 AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT; 116 } else { 117 if (handle->events & UV_DISCONNECT) { 118 afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT; 119 } 120 } 121 if (handle->events & UV_WRITABLE) { 122 afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL; 123 } 124 125 memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped); 126 127 result = uv__msafd_poll((SOCKET) handle->peer_socket, 128 afd_poll_info, 129 afd_poll_info, 130 &req->u.io.overlapped); 131 if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) { 132 /* Queue this req, reporting an error. */ 133 SET_REQ_ERROR(req, WSAGetLastError()); 134 uv__insert_pending_req(loop, req); 135 } 136} 137 138 139static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, 140 uv_req_t* req) { 141 unsigned char mask_events; 142 AFD_POLL_INFO* afd_poll_info; 143 144 if (req == &handle->poll_req_1) { 145 afd_poll_info = &handle->afd_poll_info_1; 146 handle->submitted_events_1 = 0; 147 mask_events = handle->mask_events_1; 148 } else if (req == &handle->poll_req_2) { 149 afd_poll_info = &handle->afd_poll_info_2; 150 handle->submitted_events_2 = 0; 151 mask_events = handle->mask_events_2; 152 } else { 153 assert(0); 154 return; 155 } 156 157 /* Report an error unless the select was just interrupted. */ 158 if (!REQ_SUCCESS(req)) { 159 DWORD error = GET_REQ_SOCK_ERROR(req); 160 if (error != WSAEINTR && handle->events != 0) { 161 handle->events = 0; /* Stop the watcher */ 162 handle->poll_cb(handle, uv_translate_sys_error(error), 0); 163 } 164 165 } else if (afd_poll_info->NumberOfHandles >= 1) { 166 unsigned char events = 0; 167 168 if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE | 169 AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) { 170 events |= UV_READABLE; 171 if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) { 172 events |= UV_DISCONNECT; 173 } 174 } 175 if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND | 176 AFD_POLL_CONNECT_FAIL)) != 0) { 177 events |= UV_WRITABLE; 178 } 179 180 events &= handle->events & ~mask_events; 181 182 if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) { 183 /* Stop polling. */ 184 handle->events = 0; 185 if (uv__is_active(handle)) 186 uv__handle_stop(handle); 187 } 188 189 if (events != 0) { 190 handle->poll_cb(handle, 0, events); 191 } 192 } 193 194 if ((handle->events & ~(handle->submitted_events_1 | 195 handle->submitted_events_2)) != 0) { 196 uv__fast_poll_submit_poll_req(loop, handle); 197 } else if ((handle->flags & UV_HANDLE_CLOSING) && 198 handle->submitted_events_1 == 0 && 199 handle->submitted_events_2 == 0) { 200 uv__want_endgame(loop, (uv_handle_t*) handle); 201 } 202} 203 204 205static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp, 206 WSAPROTOCOL_INFOW* protocol_info) { 207 SOCKET sock = 0; 208 209 sock = WSASocketW(protocol_info->iAddressFamily, 210 protocol_info->iSocketType, 211 protocol_info->iProtocol, 212 protocol_info, 213 0, 214 WSA_FLAG_OVERLAPPED); 215 if (sock == INVALID_SOCKET) { 216 return INVALID_SOCKET; 217 } 218 219 if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) { 220 goto error; 221 }; 222 223 if (CreateIoCompletionPort((HANDLE) sock, 224 iocp, 225 (ULONG_PTR) sock, 226 0) == NULL) { 227 goto error; 228 } 229 230 return sock; 231 232 error: 233 closesocket(sock); 234 return INVALID_SOCKET; 235} 236 237 238static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop, 239 WSAPROTOCOL_INFOW* protocol_info) { 240 int index, i; 241 SOCKET peer_socket; 242 243 index = -1; 244 for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) { 245 if (memcmp((void*) &protocol_info->ProviderId, 246 (void*) &uv_msafd_provider_ids[i], 247 sizeof protocol_info->ProviderId) == 0) { 248 index = i; 249 } 250 } 251 252 /* Check if the protocol uses an msafd socket. */ 253 if (index < 0) { 254 return INVALID_SOCKET; 255 } 256 257 /* If we didn't (try) to create a peer socket yet, try to make one. Don't try 258 * again if the peer socket creation failed earlier for the same protocol. */ 259 peer_socket = loop->poll_peer_sockets[index]; 260 if (peer_socket == 0) { 261 peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info); 262 loop->poll_peer_sockets[index] = peer_socket; 263 } 264 265 return peer_socket; 266} 267 268 269static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) { 270 uv_req_t* req = (uv_req_t*) arg; 271 uv_poll_t* handle = (uv_poll_t*) req->data; 272 unsigned char reported_events; 273 int r; 274 uv_single_fd_set_t rfds, wfds, efds; 275 struct timeval timeout; 276 277 assert(handle->type == UV_POLL); 278 assert(req->type == UV_POLL_REQ); 279 280 if (handle->events & UV_READABLE) { 281 rfds.fd_count = 1; 282 rfds.fd_array[0] = handle->socket; 283 } else { 284 rfds.fd_count = 0; 285 } 286 287 if (handle->events & UV_WRITABLE) { 288 wfds.fd_count = 1; 289 wfds.fd_array[0] = handle->socket; 290 efds.fd_count = 1; 291 efds.fd_array[0] = handle->socket; 292 } else { 293 wfds.fd_count = 0; 294 efds.fd_count = 0; 295 } 296 297 /* Make the select() time out after 3 minutes. If select() hangs because the 298 * user closed the socket, we will at least not hang indefinitely. */ 299 timeout.tv_sec = 3 * 60; 300 timeout.tv_usec = 0; 301 302 r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout); 303 if (r == SOCKET_ERROR) { 304 /* Queue this req, reporting an error. */ 305 SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError()); 306 POST_COMPLETION_FOR_REQ(handle->loop, req); 307 return 0; 308 } 309 310 reported_events = 0; 311 312 if (r > 0) { 313 if (rfds.fd_count > 0) { 314 assert(rfds.fd_count == 1); 315 assert(rfds.fd_array[0] == handle->socket); 316 reported_events |= UV_READABLE; 317 } 318 319 if (wfds.fd_count > 0) { 320 assert(wfds.fd_count == 1); 321 assert(wfds.fd_array[0] == handle->socket); 322 reported_events |= UV_WRITABLE; 323 } else if (efds.fd_count > 0) { 324 assert(efds.fd_count == 1); 325 assert(efds.fd_array[0] == handle->socket); 326 reported_events |= UV_WRITABLE; 327 } 328 } 329 330 SET_REQ_SUCCESS(req); 331 req->u.io.overlapped.InternalHigh = (DWORD) reported_events; 332 POST_COMPLETION_FOR_REQ(handle->loop, req); 333 334 return 0; 335} 336 337 338static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) { 339 uv_req_t* req; 340 341 /* Find a yet unsubmitted req to submit. */ 342 if (handle->submitted_events_1 == 0) { 343 req = &handle->poll_req_1; 344 handle->submitted_events_1 = handle->events; 345 handle->mask_events_1 = 0; 346 handle->mask_events_2 = handle->events; 347 } else if (handle->submitted_events_2 == 0) { 348 req = &handle->poll_req_2; 349 handle->submitted_events_2 = handle->events; 350 handle->mask_events_1 = handle->events; 351 handle->mask_events_2 = 0; 352 } else { 353 assert(0); 354 return; 355 } 356 357 if (!QueueUserWorkItem(uv__slow_poll_thread_proc, 358 (void*) req, 359 WT_EXECUTELONGFUNCTION)) { 360 /* Make this req pending, reporting an error. */ 361 SET_REQ_ERROR(req, GetLastError()); 362 uv__insert_pending_req(loop, req); 363 } 364} 365 366 367 368static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, 369 uv_req_t* req) { 370 unsigned char mask_events; 371 int err; 372 373 if (req == &handle->poll_req_1) { 374 handle->submitted_events_1 = 0; 375 mask_events = handle->mask_events_1; 376 } else if (req == &handle->poll_req_2) { 377 handle->submitted_events_2 = 0; 378 mask_events = handle->mask_events_2; 379 } else { 380 assert(0); 381 return; 382 } 383 384 if (!REQ_SUCCESS(req)) { 385 /* Error. */ 386 if (handle->events != 0) { 387 err = GET_REQ_ERROR(req); 388 handle->events = 0; /* Stop the watcher */ 389 handle->poll_cb(handle, uv_translate_sys_error(err), 0); 390 } 391 } else { 392 /* Got some events. */ 393 int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events; 394 if (events != 0) { 395 handle->poll_cb(handle, 0, events); 396 } 397 } 398 399 if ((handle->events & ~(handle->submitted_events_1 | 400 handle->submitted_events_2)) != 0) { 401 uv__slow_poll_submit_poll_req(loop, handle); 402 } else if ((handle->flags & UV_HANDLE_CLOSING) && 403 handle->submitted_events_1 == 0 && 404 handle->submitted_events_2 == 0) { 405 uv__want_endgame(loop, (uv_handle_t*) handle); 406 } 407} 408 409 410int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) { 411 return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd)); 412} 413 414 415int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, 416 uv_os_sock_t socket) { 417 WSAPROTOCOL_INFOW protocol_info; 418 int len; 419 SOCKET peer_socket, base_socket; 420 DWORD bytes; 421 DWORD yes = 1; 422 423 /* Set the socket to nonblocking mode */ 424 if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) 425 return uv_translate_sys_error(WSAGetLastError()); 426 427/* Try to obtain a base handle for the socket. This increases this chances that 428 * we find an AFD handle and are able to use the fast poll mechanism. 429 */ 430#ifndef NDEBUG 431 base_socket = INVALID_SOCKET; 432#endif 433 434 if (WSAIoctl(socket, 435 SIO_BASE_HANDLE, 436 NULL, 437 0, 438 &base_socket, 439 sizeof base_socket, 440 &bytes, 441 NULL, 442 NULL) == 0) { 443 assert(base_socket != 0 && base_socket != INVALID_SOCKET); 444 socket = base_socket; 445 } 446 447 uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL); 448 handle->socket = socket; 449 handle->events = 0; 450 451 /* Obtain protocol information about the socket. */ 452 len = sizeof protocol_info; 453 if (getsockopt(socket, 454 SOL_SOCKET, 455 SO_PROTOCOL_INFOW, 456 (char*) &protocol_info, 457 &len) != 0) { 458 return uv_translate_sys_error(WSAGetLastError()); 459 } 460 461 /* Get the peer socket that is needed to enable fast poll. If the returned 462 * value is NULL, the protocol is not implemented by MSAFD and we'll have to 463 * use slow mode. */ 464 peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info); 465 466 if (peer_socket != INVALID_SOCKET) { 467 /* Initialize fast poll specific fields. */ 468 handle->peer_socket = peer_socket; 469 } else { 470 /* Initialize slow poll specific fields. */ 471 handle->flags |= UV_HANDLE_POLL_SLOW; 472 } 473 474 /* Initialize 2 poll reqs. */ 475 handle->submitted_events_1 = 0; 476 UV_REQ_INIT(&handle->poll_req_1, UV_POLL_REQ); 477 handle->poll_req_1.data = handle; 478 479 handle->submitted_events_2 = 0; 480 UV_REQ_INIT(&handle->poll_req_2, UV_POLL_REQ); 481 handle->poll_req_2.data = handle; 482 483 return 0; 484} 485 486 487static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) { 488 int submitted_events; 489 490 assert(handle->type == UV_POLL); 491 assert(!(handle->flags & UV_HANDLE_CLOSING)); 492 assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT | 493 UV_PRIORITIZED)) == 0); 494 495 handle->events = events; 496 handle->poll_cb = cb; 497 498 if (handle->events == 0) { 499 uv__handle_stop(handle); 500 return 0; 501 } 502 503 uv__handle_start(handle); 504 submitted_events = handle->submitted_events_1 | handle->submitted_events_2; 505 506 if (handle->events & ~submitted_events) { 507 if (handle->flags & UV_HANDLE_POLL_SLOW) { 508 uv__slow_poll_submit_poll_req(handle->loop, handle); 509 } else { 510 uv__fast_poll_submit_poll_req(handle->loop, handle); 511 } 512 } 513 514 return 0; 515} 516 517 518int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) { 519 return uv__poll_set(handle, events, cb); 520} 521 522 523int uv_poll_stop(uv_poll_t* handle) { 524 return uv__poll_set(handle, 0, handle->poll_cb); 525} 526 527 528void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) { 529 if (!(handle->flags & UV_HANDLE_POLL_SLOW)) { 530 uv__fast_poll_process_poll_req(loop, handle, req); 531 } else { 532 uv__slow_poll_process_poll_req(loop, handle, req); 533 } 534} 535 536 537int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle) { 538 AFD_POLL_INFO afd_poll_info; 539 DWORD error; 540 int result; 541 542 handle->events = 0; 543 uv__handle_closing(handle); 544 545 if (handle->submitted_events_1 == 0 && 546 handle->submitted_events_2 == 0) { 547 uv__want_endgame(loop, (uv_handle_t*) handle); 548 return 0; 549 } 550 551 if (handle->flags & UV_HANDLE_POLL_SLOW) 552 return 0; 553 554 /* Cancel outstanding poll requests by executing another, unique poll 555 * request that forces the outstanding ones to return. */ 556 afd_poll_info.Exclusive = TRUE; 557 afd_poll_info.NumberOfHandles = 1; 558 afd_poll_info.Timeout.QuadPart = INT64_MAX; 559 afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket; 560 afd_poll_info.Handles[0].Status = 0; 561 afd_poll_info.Handles[0].Events = AFD_POLL_ALL; 562 563 result = uv__msafd_poll(handle->socket, 564 &afd_poll_info, 565 uv__get_afd_poll_info_dummy(), 566 uv__get_overlapped_dummy()); 567 568 if (result == SOCKET_ERROR) { 569 error = WSAGetLastError(); 570 if (error != WSA_IO_PENDING) 571 return uv_translate_sys_error(error); 572 } 573 574 return 0; 575} 576 577 578void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle) { 579 assert(handle->flags & UV_HANDLE_CLOSING); 580 assert(!(handle->flags & UV_HANDLE_CLOSED)); 581 582 assert(handle->submitted_events_1 == 0); 583 assert(handle->submitted_events_2 == 0); 584 585 uv__handle_close(handle); 586} 587