1/* 2 * libwebsockets - small server side websockets and web server implementation 3 * 4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 */ 24 25#include "private-lib-core.h" 26 27#if defined(LWS_WITH_CLIENT) 28static int 29lws_close_trans_q_leader(struct lws_dll2 *d, void *user) 30{ 31 struct lws *w = lws_container_of(d, struct lws, dll2_cli_txn_queue); 32 33 __lws_close_free_wsi(w, (enum lws_close_status)-1, "trans q leader closing"); 34 35 return 0; 36} 37#endif 38 39void 40__lws_reset_wsi(struct lws *wsi) 41{ 42 if (!wsi) 43 return; 44 45#if defined(LWS_WITH_CLIENT) 46 47 lws_free_set_NULL(wsi->cli_hostname_copy); 48 49#if defined(LWS_WITH_CONMON) 50 51 if (wsi->conmon.dns_results_copy) { 52 lws_conmon_addrinfo_destroy(wsi->conmon.dns_results_copy); 53 wsi->conmon.dns_results_copy = NULL; 54 } 55 56 wsi->conmon.ciu_dns = 57 wsi->conmon.ciu_sockconn = 58 wsi->conmon.ciu_tls = 59 wsi->conmon.ciu_txn_resp = 0; 60#endif 61 62 /* 63 * if we have wsi in our transaction queue, if we are closing we 64 * must go through and close all those first 65 */ 66 if (wsi->a.vhost) { 67 68 /* we are no longer an active client connection that can piggyback */ 69 lws_dll2_remove(&wsi->dll_cli_active_conns); 70 71 lws_dll2_foreach_safe(&wsi->dll2_cli_txn_queue_owner, NULL, 72 lws_close_trans_q_leader); 73 74 /* 75 * !!! If we are closing, but we have pending pipelined 76 * transaction results we already sent headers for, that's going 77 * to destroy sync for HTTP/1 and leave H2 stream with no live 78 * swsi.` 79 * 80 * However this is normal if we are being closed because the 81 * transaction queue leader is closing. 82 */ 83 lws_dll2_remove(&wsi->dll2_cli_txn_queue); 84 } 85#endif 86 87 if (wsi->a.vhost) { 88 lws_vhost_lock(wsi->a.vhost); 89 lws_dll2_remove(&wsi->vh_awaiting_socket); 90 lws_vhost_unlock(wsi->a.vhost); 91 } 92 93 /* 94 * Protocol user data may be allocated either internally by lws 95 * or by specified the user. We should only free what we allocated. 96 */ 97 if (wsi->a.protocol && wsi->a.protocol->per_session_data_size && 98 wsi->user_space && !wsi->user_space_externally_allocated) { 99 /* confirm no sul left scheduled in user data itself */ 100 lws_sul_debug_zombies(wsi->a.context, wsi->user_space, 101 wsi->a.protocol->per_session_data_size, __func__); 102 lws_free_set_NULL(wsi->user_space); 103 } 104 105 /* 106 * Don't let buflist content or state from the wsi's previous life 107 * carry over to the new life 108 */ 109 110 lws_buflist_destroy_all_segments(&wsi->buflist); 111 lws_dll2_remove(&wsi->dll_buflist); 112 lws_buflist_destroy_all_segments(&wsi->buflist_out); 113#if defined(LWS_WITH_UDP) 114 if (wsi->udp) { 115 /* confirm no sul left scheduled in wsi->udp itself */ 116 lws_sul_debug_zombies(wsi->a.context, wsi->udp, 117 sizeof(*wsi->udp), "close udp wsi"); 118 lws_free_set_NULL(wsi->udp); 119 } 120#endif 121 wsi->retry = 0; 122 123#if defined(LWS_WITH_CLIENT) 124 lws_dll2_remove(&wsi->dll2_cli_txn_queue); 125 lws_dll2_remove(&wsi->dll_cli_active_conns); 126 if (wsi->cli_hostname_copy) 127 lws_free_set_NULL(wsi->cli_hostname_copy); 128#endif 129 130#if defined(LWS_WITH_SYS_ASYNC_DNS) 131 lws_async_dns_cancel(wsi); 132#endif 133 134#if defined(LWS_WITH_HTTP_PROXY) 135 if (wsi->http.buflist_post_body) 136 lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body); 137#endif 138 139#if defined(LWS_WITH_SERVER) 140 lws_dll2_remove(&wsi->listen_list); 141#endif 142 143#if defined(LWS_WITH_CLIENT) 144 if (wsi->a.vhost) 145 lws_dll2_remove(&wsi->dll_cli_active_conns); 146#endif 147 148 __lws_same_vh_protocol_remove(wsi); 149#if defined(LWS_WITH_CLIENT) 150 //lws_free_set_NULL(wsi->stash); 151 lws_free_set_NULL(wsi->cli_hostname_copy); 152#endif 153 154#if defined(LWS_WITH_PEER_LIMITS) 155 lws_peer_track_wsi_close(wsi->a.context, wsi->peer); 156 wsi->peer = NULL; 157#endif 158 159 /* since we will destroy the wsi, make absolutely sure now */ 160 161#if defined(LWS_WITH_OPENSSL) 162 __lws_ssl_remove_wsi_from_buffered_list(wsi); 163#endif 164 __lws_wsi_remove_from_sul(wsi); 165 166 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_destroy_role)) 167 lws_rops_func_fidx(wsi->role_ops, 168 LWS_ROPS_destroy_role).destroy_role(wsi); 169 170#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2) 171 __lws_header_table_detach(wsi, 0); 172#endif 173 174#if defined(LWS_ROLE_H2) 175 /* 176 * Let's try to clean out the h2-ness of the wsi 177 */ 178 179 memset(&wsi->h2, 0, sizeof(wsi->h2)); 180 181 wsi->hdr_parsing_completed = wsi->mux_substream = 182 wsi->upgraded_to_http2 = wsi->mux_stream_immortal = 183 wsi->h2_acked_settings = wsi->seen_nonpseudoheader = 184 wsi->socket_is_permanently_unusable = wsi->favoured_pollin = 185 wsi->already_did_cce = wsi->told_user_closed = 186 wsi->waiting_to_send_close_frame = wsi->close_needs_ack = 187 wsi->parent_pending_cb_on_writable = wsi->seen_zero_length_recv = 188 wsi->close_when_buffered_out_drained = wsi->could_have_pending = 0; 189#endif 190 191#if defined(LWS_WITH_CLIENT) 192 wsi->do_ws = wsi->chunked = wsi->client_rx_avail = 193 wsi->client_http_body_pending = wsi->transaction_from_pipeline_queue = 194 wsi->keepalive_active = wsi->keepalive_rejected = 195 wsi->redirected_to_get = wsi->client_pipeline = wsi->client_h2_alpn = 196 wsi->client_mux_substream = wsi->client_mux_migrated = 197 wsi->tls_session_reused = wsi->perf_done = 0; 198 199 wsi->immortal_substream_count = 0; 200#endif 201} 202 203/* req cx lock */ 204 205void 206__lws_free_wsi(struct lws *wsi) 207{ 208 struct lws_vhost *vh; 209 210 if (!wsi) 211 return; 212 213 lws_context_assert_lock_held(wsi->a.context); 214 215#if defined(LWS_WITH_SECURE_STREAMS) 216 if (wsi->for_ss) { 217 218#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) 219 if (wsi->client_bound_sspc) { 220 lws_sspc_handle_t *h = (lws_sspc_handle_t *) 221 wsi->a.opaque_user_data; 222 if (h) { 223 h->cwsi = NULL; 224 wsi->a.opaque_user_data = NULL; 225 } 226 } else 227#endif 228 { 229 /* 230 * Make certain it is disconnected from the ss by now 231 */ 232 lws_ss_handle_t *h = (lws_ss_handle_t *) 233 wsi->a.opaque_user_data; 234 235 if (h) { 236 h->wsi = NULL; 237 wsi->a.opaque_user_data = NULL; 238 } 239 } 240 } 241#endif 242 243 vh = wsi->a.vhost; 244 245 __lws_reset_wsi(wsi); 246 __lws_wsi_remove_from_sul(wsi); 247 248 if (vh) 249 /* this may destroy vh */ 250 __lws_vhost_unbind_wsi(wsi); /* req cx + vh lock */ 251 252#if defined(LWS_WITH_CLIENT) 253 if (wsi->stash) 254 lws_free_set_NULL(wsi->stash); 255#endif 256 257 if (wsi->a.context->event_loop_ops->destroy_wsi) 258 wsi->a.context->event_loop_ops->destroy_wsi(wsi); 259 260 lwsl_wsi_debug(wsi, "tsi fds count %d\n", 261 wsi->a.context->pt[(int)wsi->tsi].fds_count); 262 263 /* confirm no sul left scheduled in wsi itself */ 264 lws_sul_debug_zombies(wsi->a.context, wsi, sizeof(*wsi), __func__); 265 266 __lws_lc_untag(wsi->a.context, &wsi->lc); 267 lws_free(wsi); 268} 269 270 271void 272lws_remove_child_from_any_parent(struct lws *wsi) 273{ 274 struct lws **pwsi; 275 int seen = 0; 276 277 if (!wsi->parent) 278 return; 279 280 /* detach ourselves from parent's child list */ 281 pwsi = &wsi->parent->child_list; 282 while (*pwsi) { 283 if (*pwsi == wsi) { 284 lwsl_wsi_info(wsi, "detach from parent %s", 285 lws_wsi_tag(wsi->parent)); 286 287 if (wsi->parent->a.protocol) 288 wsi->parent->a.protocol->callback(wsi, 289 LWS_CALLBACK_CHILD_CLOSING, 290 wsi->parent->user_space, wsi, 0); 291 292 *pwsi = wsi->sibling_list; 293 seen = 1; 294 break; 295 } 296 pwsi = &(*pwsi)->sibling_list; 297 } 298 if (!seen) 299 lwsl_wsi_err(wsi, "failed to detach from parent"); 300 301 wsi->parent = NULL; 302} 303 304#if defined(LWS_WITH_CLIENT) 305void 306lws_inform_client_conn_fail(struct lws *wsi, void *arg, size_t len) 307{ 308 lws_addrinfo_clean(wsi); 309 310 if (wsi->already_did_cce) 311 return; 312 313 wsi->already_did_cce = 1; 314 315 if (!wsi->a.protocol) 316 return; 317 318 if (!wsi->client_suppress_CONNECTION_ERROR) 319 wsi->a.protocol->callback(wsi, 320 LWS_CALLBACK_CLIENT_CONNECTION_ERROR, 321 wsi->user_space, arg, len); 322} 323#endif 324 325void 326lws_addrinfo_clean(struct lws *wsi) 327{ 328#if defined(LWS_WITH_CLIENT) 329 struct lws_dll2 *d = lws_dll2_get_head(&wsi->dns_sorted_list), *d1; 330 331 while (d) { 332 lws_dns_sort_t *r = lws_container_of(d, lws_dns_sort_t, list); 333 334 d1 = d->next; 335 lws_dll2_remove(d); 336 lws_free(r); 337 338 d = d1; 339 } 340#endif 341} 342 343/* requires cx and pt lock */ 344 345void 346__lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, 347 const char *caller) 348{ 349 struct lws_context_per_thread *pt; 350 const struct lws_protocols *pro; 351 struct lws_context *context; 352 struct lws *wsi1, *wsi2; 353 int n, ccb; 354 355 if (!wsi) 356 return; 357 358 lwsl_wsi_info(wsi, "caller: %s", caller); 359 360 lws_access_log(wsi); 361 362 if (!lws_dll2_is_detached(&wsi->dll_buflist)) 363 lwsl_wsi_info(wsi, "going down with stuff in buflist"); 364 365 context = wsi->a.context; 366 pt = &context->pt[(int)wsi->tsi]; 367 368 if (pt->pipe_wsi == wsi) 369 pt->pipe_wsi = NULL; 370 371#if defined(LWS_WITH_SYS_METRICS) && \ 372 (defined(LWS_WITH_CLIENT) || defined(LWS_WITH_SERVER)) 373 /* wsi level: only reports if dangling caliper */ 374 if (wsi->cal_conn.mt && wsi->cal_conn.us_start) { 375 if ((lws_metrics_priv_to_pub(wsi->cal_conn.mt)->flags) & LWSMTFL_REPORT_HIST) { 376 lws_metrics_caliper_report_hist(wsi->cal_conn, (struct lws *)NULL); 377 } else { 378 lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO); 379 lws_metrics_caliper_done(wsi->cal_conn); 380 } 381 } else 382 lws_metrics_caliper_done(wsi->cal_conn); 383#endif 384 385#if defined(LWS_WITH_SYS_ASYNC_DNS) 386 if (wsi == context->async_dns.wsi) 387 context->async_dns.wsi = NULL; 388#endif 389 390 lws_pt_assert_lock_held(pt); 391 392#if defined(LWS_WITH_CLIENT) 393 394 lws_free_set_NULL(wsi->cli_hostname_copy); 395 wsi->client_mux_substream_was = wsi->client_mux_substream; 396 397 lws_addrinfo_clean(wsi); 398#endif 399 400#if defined(LWS_WITH_HTTP2) 401 if (wsi->mux_stream_immortal) 402 lws_http_close_immortal(wsi); 403#endif 404 405 /* if we have children, close them first */ 406 if (wsi->child_list) { 407 wsi2 = wsi->child_list; 408 while (wsi2) { 409 wsi1 = wsi2->sibling_list; 410// wsi2->parent = NULL; 411 /* stop it doing shutdown processing */ 412 wsi2->socket_is_permanently_unusable = 1; 413 __lws_close_free_wsi(wsi2, reason, 414 "general child recurse"); 415 wsi2 = wsi1; 416 } 417 wsi->child_list = NULL; 418 } 419 420#if defined(LWS_ROLE_RAW_FILE) 421 if (wsi->role_ops == &role_ops_raw_file) { 422 lws_remove_child_from_any_parent(wsi); 423 __remove_wsi_socket_from_fds(wsi); 424 if (wsi->a.protocol) 425 wsi->a.protocol->callback(wsi, wsi->role_ops->close_cb[0], 426 wsi->user_space, NULL, 0); 427 goto async_close; 428 } 429#endif 430 431 wsi->wsistate_pre_close = wsi->wsistate; 432 433#ifdef LWS_WITH_CGI 434 if (wsi->role_ops == &role_ops_cgi) { 435 436 // lwsl_debug("%s: closing stdwsi index %d\n", __func__, (int)wsi->lsp_channel); 437 438 /* we are not a network connection, but a handler for CGI io */ 439 if (wsi->parent && wsi->parent->http.cgi) { 440 441 /* 442 * We need to keep the logical cgi around so we can 443 * drain it 444 */ 445 446// if (wsi->parent->child_list == wsi && !wsi->sibling_list) 447// lws_cgi_remove_and_kill(wsi->parent); 448 449 /* end the binding between us and network connection */ 450 if (wsi->parent->http.cgi && wsi->parent->http.cgi->lsp) 451 wsi->parent->http.cgi->lsp->stdwsi[(int)wsi->lsp_channel] = 452 NULL; 453 } 454 wsi->socket_is_permanently_unusable = 1; 455 456 goto just_kill_connection; 457 } 458 459 if (wsi->http.cgi) 460 lws_cgi_remove_and_kill(wsi); 461#endif 462 463#if defined(LWS_WITH_CLIENT) 464 if (!wsi->close_is_redirect) 465 lws_free_set_NULL(wsi->stash); 466#endif 467 468 if (wsi->role_ops == &role_ops_raw_skt) { 469 wsi->socket_is_permanently_unusable = 1; 470 goto just_kill_connection; 471 } 472#if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)) 473 if (lwsi_role_http(wsi) && lwsi_role_server(wsi) && 474 wsi->http.fop_fd != NULL) 475 lws_vfs_file_close(&wsi->http.fop_fd); 476#endif 477 478 if (lwsi_state(wsi) == LRS_DEAD_SOCKET) 479 return; 480 481 if (wsi->socket_is_permanently_unusable || 482 reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY || 483 lwsi_state(wsi) == LRS_SHUTDOWN) 484 goto just_kill_connection; 485 486 switch (lwsi_state_PRE_CLOSE(wsi)) { 487 case LRS_DEAD_SOCKET: 488 return; 489 490 /* we tried the polite way... */ 491 case LRS_WAITING_TO_SEND_CLOSE: 492 case LRS_AWAITING_CLOSE_ACK: 493 case LRS_RETURNED_CLOSE: 494 goto just_kill_connection; 495 496 case LRS_FLUSHING_BEFORE_CLOSE: 497 if (lws_has_buffered_out(wsi) 498#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION) 499 || wsi->http.comp_ctx.buflist_comp || 500 wsi->http.comp_ctx.may_have_more 501#endif 502 ) { 503 lws_callback_on_writable(wsi); 504 return; 505 } 506 lwsl_wsi_info(wsi, " end LRS_FLUSHING_BEFORE_CLOSE"); 507 goto just_kill_connection; 508 default: 509 if (lws_has_buffered_out(wsi) 510#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION) 511 || wsi->http.comp_ctx.buflist_comp || 512 wsi->http.comp_ctx.may_have_more 513#endif 514 ) { 515 lwsl_wsi_info(wsi, "LRS_FLUSHING_BEFORE_CLOSE"); 516 lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE); 517 __lws_set_timeout(wsi, 518 PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5); 519 return; 520 } 521 break; 522 } 523 524 if (lwsi_state(wsi) == LRS_WAITING_CONNECT || 525 lwsi_state(wsi) == LRS_WAITING_DNS || 526 lwsi_state(wsi) == LRS_H1C_ISSUE_HANDSHAKE) 527 goto just_kill_connection; 528 529 if (!wsi->told_user_closed && wsi->user_space && wsi->a.protocol && 530 wsi->protocol_bind_balance) { 531 wsi->a.protocol->callback(wsi, 532 wsi->role_ops->protocol_unbind_cb[ 533 !!lwsi_role_server(wsi)], 534 wsi->user_space, (void *)__func__, 0); 535 wsi->protocol_bind_balance = 0; 536 } 537 538 /* 539 * signal we are closing, lws_write will 540 * add any necessary version-specific stuff. If the write fails, 541 * no worries we are closing anyway. If we didn't initiate this 542 * close, then our state has been changed to 543 * LRS_RETURNED_CLOSE and we will skip this. 544 * 545 * Likewise if it's a second call to close this connection after we 546 * sent the close indication to the peer already, we are in state 547 * LRS_AWAITING_CLOSE_ACK and will skip doing this a second time. 548 */ 549 550 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol) && 551 lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol). 552 close_via_role_protocol(wsi, reason)) { 553 lwsl_wsi_info(wsi, "close_via_role took over (sockfd %d)", 554 wsi->desc.sockfd); 555 return; 556 } 557 558just_kill_connection: 559 560 lwsl_wsi_debug(wsi, "real just_kill_connection A: (sockfd %d)", 561 wsi->desc.sockfd); 562 563#if defined(LWS_WITH_THREADPOOL) && defined(LWS_HAVE_PTHREAD_H) 564 lws_threadpool_wsi_closing(wsi); 565#endif 566 567#if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)) 568 if (lwsi_role_http(wsi) && lwsi_role_server(wsi) && 569 wsi->http.fop_fd != NULL) 570 lws_vfs_file_close(&wsi->http.fop_fd); 571#endif 572 573 lws_sul_cancel(&wsi->sul_connect_timeout); 574#if defined(LWS_WITH_SYS_ASYNC_DNS) 575 lws_async_dns_cancel(wsi); 576#endif 577 578#if defined(LWS_WITH_HTTP_PROXY) 579 if (wsi->http.buflist_post_body) 580 lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body); 581#endif 582#if defined(LWS_WITH_UDP) 583 if (wsi->udp) { 584 /* confirm no sul left scheduled in wsi->udp itself */ 585 lws_sul_debug_zombies(wsi->a.context, wsi->udp, 586 sizeof(*wsi->udp), "close udp wsi"); 587 588 lws_free_set_NULL(wsi->udp); 589 } 590#endif 591 592 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_kill_connection)) 593 lws_rops_func_fidx(wsi->role_ops, 594 LWS_ROPS_close_kill_connection). 595 close_kill_connection(wsi, reason); 596 597 n = 0; 598 599 if (!wsi->told_user_closed && wsi->user_space && 600 wsi->protocol_bind_balance && wsi->a.protocol) { 601 lwsl_debug("%s: %s: DROP_PROTOCOL %s\n", __func__, lws_wsi_tag(wsi), 602 wsi->a.protocol ? wsi->a.protocol->name: "NULL"); 603 if (wsi->a.protocol) 604 wsi->a.protocol->callback(wsi, 605 wsi->role_ops->protocol_unbind_cb[ 606 !!lwsi_role_server(wsi)], 607 wsi->user_space, (void *)__func__, 0); 608 wsi->protocol_bind_balance = 0; 609 } 610 611#if defined(LWS_WITH_CLIENT) 612 if (( 613#if defined(LWS_ROLE_WS) 614 /* 615 * If our goal is a ws upgrade, effectively we did not reach 616 * ESTABLISHED if we did not get the upgrade server reply 617 */ 618 (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY && 619 wsi->role_ops == &role_ops_ws) || 620#endif 621 lwsi_state(wsi) == LRS_WAITING_DNS || 622 lwsi_state(wsi) == LRS_WAITING_CONNECT) && 623 !wsi->already_did_cce && wsi->a.protocol && 624 !wsi->close_is_redirect) { 625 static const char _reason[] = "closed before established"; 626 627 lwsl_wsi_debug(wsi, "closing in unestablished state 0x%x", 628 lwsi_state(wsi)); 629 wsi->socket_is_permanently_unusable = 1; 630 631 lws_inform_client_conn_fail(wsi, 632 (void *)_reason, sizeof(_reason)); 633 } 634#endif 635 636 /* 637 * Testing with ab shows that we have to stage the socket close when 638 * the system is under stress... shutdown any further TX, change the 639 * state to one that won't emit anything more, and wait with a timeout 640 * for the POLLIN to show a zero-size rx before coming back and doing 641 * the actual close. 642 */ 643 if (wsi->role_ops != &role_ops_raw_skt && !lwsi_role_client(wsi) && 644 lwsi_state(wsi) != LRS_SHUTDOWN && 645 lwsi_state(wsi) != LRS_UNCONNECTED && 646 reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY && 647 !wsi->socket_is_permanently_unusable) { 648 649#if defined(LWS_WITH_TLS) 650 if (lws_is_ssl(wsi) && wsi->tls.ssl) { 651 n = 0; 652 switch (__lws_tls_shutdown(wsi)) { 653 case LWS_SSL_CAPABLE_DONE: 654 case LWS_SSL_CAPABLE_ERROR: 655 case LWS_SSL_CAPABLE_MORE_SERVICE_READ: 656 case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE: 657 case LWS_SSL_CAPABLE_MORE_SERVICE: 658 break; 659 } 660 } else 661#endif 662 { 663 lwsl_info("%s: shutdown conn: %s (sk %d, state 0x%x)\n", 664 __func__, lws_wsi_tag(wsi), (int)(lws_intptr_t)wsi->desc.sockfd, 665 lwsi_state(wsi)); 666 if (!wsi->socket_is_permanently_unusable && 667 lws_socket_is_valid(wsi->desc.sockfd)) { 668 wsi->socket_is_permanently_unusable = 1; 669 n = shutdown(wsi->desc.sockfd, SHUT_WR); 670 } 671 } 672 if (n) 673 lwsl_wsi_debug(wsi, "closing: shutdown (state 0x%x) ret %d", 674 lwsi_state(wsi), LWS_ERRNO); 675 676 /* 677 * This causes problems on WINCE / ESP32 with disconnection 678 * when the events are half closing connection 679 */ 680#if !defined(_WIN32_WCE) && !defined(LWS_PLAT_FREERTOS) 681 /* libuv: no event available to guarantee completion */ 682 if (!wsi->socket_is_permanently_unusable && 683#if defined(LWS_WITH_CLIENT) 684 !wsi->close_is_redirect && 685#endif 686 lws_socket_is_valid(wsi->desc.sockfd) && 687 lwsi_state(wsi) != LRS_SHUTDOWN && 688 (context->event_loop_ops->flags & LELOF_ISPOLL)) { 689 __lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN); 690 lwsi_set_state(wsi, LRS_SHUTDOWN); 691 __lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH, 692 (int)context->timeout_secs); 693 694 return; 695 } 696#endif 697 } 698 699 lwsl_wsi_info(wsi, "real just_kill_connection: sockfd %d\n", 700 wsi->desc.sockfd); 701 702#ifdef LWS_WITH_HUBBUB 703 if (wsi->http.rw) { 704 lws_rewrite_destroy(wsi->http.rw); 705 wsi->http.rw = NULL; 706 } 707#endif 708 709 if (wsi->http.pending_return_headers) 710 lws_free_set_NULL(wsi->http.pending_return_headers); 711 712 /* 713 * we won't be servicing or receiving anything further from this guy 714 * delete socket from the internal poll list if still present 715 */ 716 __lws_ssl_remove_wsi_from_buffered_list(wsi); 717 __lws_wsi_remove_from_sul(wsi); 718 719 //if (wsi->told_event_loop_closed) // cgi std close case (dummy-callback) 720 // return; 721 722 /* checking return redundant since we anyway close */ 723 __remove_wsi_socket_from_fds(wsi); 724 725 lwsi_set_state(wsi, LRS_DEAD_SOCKET); 726 lws_buflist_destroy_all_segments(&wsi->buflist); 727 lws_dll2_remove(&wsi->dll_buflist); 728 729 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_role)) 730 lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_role). 731 close_role(pt, wsi); 732 733 /* tell the user it's all over for this guy */ 734 735 ccb = 0; 736 if ((lwsi_state_est_PRE_CLOSE(wsi) || 737 /* raw skt adopted but didn't complete tls hs should CLOSE */ 738 (wsi->role_ops == &role_ops_raw_skt && !lwsi_role_client(wsi)) || 739 lwsi_state_PRE_CLOSE(wsi) == LRS_WAITING_SERVER_REPLY) && 740 !wsi->told_user_closed && 741 wsi->role_ops->close_cb[lwsi_role_server(wsi)]) { 742 if (!wsi->upgraded_to_http2 || !lwsi_role_client(wsi)) 743 ccb = 1; 744 /* 745 * The network wsi for a client h2 connection shouldn't 746 * call back for its role: the child stream connections 747 * own the role. Otherwise h2 will call back closed 748 * one too many times as the children do it and then 749 * the closing network stream. 750 */ 751 } 752 753 if (!wsi->told_user_closed && 754 !lws_dll2_is_detached(&wsi->vh_awaiting_socket)) 755 /* 756 * He's a guy who go started with dns, but failed or is 757 * caught with a shutdown before he got the result. We have 758 * to issclient_mux_substream_wasue him a close cb 759 */ 760 ccb = 1; 761 762 lwsl_wsi_info(wsi, "cce=%d", ccb); 763 764 pro = wsi->a.protocol; 765 766 if (wsi->already_did_cce) 767 /* 768 * If we handled this by CLIENT_CONNECTION_ERROR, it's 769 * mutually exclusive with CLOSE 770 */ 771 ccb = 0; 772 773#if defined(LWS_WITH_CLIENT) 774 if (!wsi->close_is_redirect && !ccb && 775 (lwsi_state_PRE_CLOSE(wsi) & LWSIFS_NOT_EST) && 776 lwsi_role_client(wsi)) { 777 lws_inform_client_conn_fail(wsi, "Closed before conn", 18); 778 } 779#endif 780 if (ccb 781#if defined(LWS_WITH_CLIENT) 782 && !wsi->close_is_redirect 783#endif 784 ) { 785 786 if (!wsi->a.protocol && wsi->a.vhost && wsi->a.vhost->protocols) 787 pro = &wsi->a.vhost->protocols[0]; 788 789 if (pro) 790 pro->callback(wsi, 791 wsi->role_ops->close_cb[lwsi_role_server(wsi)], 792 wsi->user_space, NULL, 0); 793 wsi->told_user_closed = 1; 794 } 795 796#if defined(LWS_ROLE_RAW_FILE) 797async_close: 798#endif 799 800#if defined(LWS_WITH_SECURE_STREAMS) 801 if (wsi->for_ss) { 802 lwsl_wsi_debug(wsi, "for_ss"); 803 /* 804 * We were adopted for a particular ss, but, eg, we may not 805 * have succeeded with the connection... we are closing which is 806 * good, but we have to invalidate any pointer the related ss 807 * handle may be holding on us 808 */ 809#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) 810 811 if (wsi->client_proxy_onward) { 812 /* 813 * We are an onward proxied wsi at the proxy, 814 * opaque is proxing "conn", we must remove its pointer 815 * to us since we are destroying 816 */ 817 lws_proxy_clean_conn_ss(wsi); 818 } else 819 820 if (wsi->client_bound_sspc) { 821 lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data; 822 823 if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) { 824 825#if defined(LWS_WITH_SYS_METRICS) 826 /* 827 * If any hanging caliper measurement, dump it, and free any tags 828 */ 829 lws_metrics_caliper_report_hist(h->cal_txn, (struct lws *)NULL); 830#endif 831 832 h->cwsi = NULL; 833 //wsi->a.opaque_user_data = NULL; 834 } 835 } else 836#endif 837 { 838 lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data; 839 840 if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) { 841 842 /* 843 * ss level: only reports if dangling caliper 844 * not already reported 845 */ 846 lws_metrics_caliper_report_hist(h->cal_txn, wsi); 847 848 h->wsi = NULL; 849 wsi->a.opaque_user_data = NULL; 850 851 if (h->ss_dangling_connected && 852 lws_ss_event_helper(h, LWSSSCS_DISCONNECTED) == 853 LWSSSSRET_DESTROY_ME) { 854 855 lws_ss_destroy(&h); 856 } 857 } 858 } 859 } 860#endif 861 862 863 lws_remove_child_from_any_parent(wsi); 864 wsi->socket_is_permanently_unusable = 1; 865 866 if (wsi->a.context->event_loop_ops->wsi_logical_close) 867 if (wsi->a.context->event_loop_ops->wsi_logical_close(wsi)) 868 return; 869 870 __lws_close_free_wsi_final(wsi); 871} 872 873 874/* cx + vh lock */ 875 876void 877__lws_close_free_wsi_final(struct lws *wsi) 878{ 879 int n; 880 881 if (!wsi->shadow && 882 lws_socket_is_valid(wsi->desc.sockfd) && !lws_ssl_close(wsi)) { 883 lwsl_wsi_debug(wsi, "fd %d", wsi->desc.sockfd); 884 n = compatible_close(wsi->desc.sockfd); 885 if (n) 886 lwsl_wsi_debug(wsi, "closing: close ret %d", LWS_ERRNO); 887 888 __remove_wsi_socket_from_fds(wsi); 889 if (lws_socket_is_valid(wsi->desc.sockfd)) 890 delete_from_fd(wsi->a.context, wsi->desc.sockfd); 891 892#if !defined(LWS_PLAT_FREERTOS) && !defined(WIN32) && !defined(LWS_PLAT_OPTEE) 893 delete_from_fdwsi(wsi->a.context, wsi); 894#endif 895 896 sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd); 897 } 898 899 /* ... if we're closing the cancel pipe, account for it */ 900 901 { 902 struct lws_context_per_thread *pt = 903 &wsi->a.context->pt[(int)wsi->tsi]; 904 905 if (pt->pipe_wsi == wsi) 906 pt->pipe_wsi = NULL; 907 if (pt->dummy_pipe_fds[0] == wsi->desc.sockfd) 908 pt->dummy_pipe_fds[0] = LWS_SOCK_INVALID; 909 } 910 911 wsi->desc.sockfd = LWS_SOCK_INVALID; 912 913#if defined(LWS_WITH_CLIENT) 914 lws_free_set_NULL(wsi->cli_hostname_copy); 915 if (wsi->close_is_redirect) { 916 917 wsi->close_is_redirect = 0; 918 919 lwsl_wsi_info(wsi, "picking up redirection"); 920 921 lws_role_transition(wsi, LWSIFR_CLIENT, LRS_UNCONNECTED, 922 &role_ops_h1); 923 924#if defined(LWS_WITH_HTTP2) 925 if (wsi->client_mux_substream_was) 926 wsi->h2.END_STREAM = wsi->h2.END_HEADERS = 0; 927#endif 928#if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT) 929 if (wsi->mux.parent_wsi) { 930 lws_wsi_mux_sibling_disconnect(wsi); 931 wsi->mux.parent_wsi = NULL; 932 } 933#endif 934 935#if defined(LWS_WITH_TLS) 936 memset(&wsi->tls, 0, sizeof(wsi->tls)); 937#endif 938 939 // wsi->a.protocol = NULL; 940 if (wsi->a.protocol) 941 lws_bind_protocol(wsi, wsi->a.protocol, "client_reset"); 942 wsi->pending_timeout = NO_PENDING_TIMEOUT; 943 wsi->hdr_parsing_completed = 0; 944 945#if defined(LWS_WITH_TLS) 946 if (wsi->stash->cis[CIS_ALPN]) 947 lws_strncpy(wsi->alpn, wsi->stash->cis[CIS_ALPN], 948 sizeof(wsi->alpn)); 949#endif 950 951 if (lws_header_table_attach(wsi, 0)) { 952 lwsl_wsi_err(wsi, "failed to get ah"); 953 return; 954 } 955// } 956 //_lws_header_table_reset(wsi->http.ah); 957 958#if defined(LWS_WITH_TLS) 959 wsi->tls.use_ssl = (unsigned int)wsi->flags; 960#endif 961 962#if defined(LWS_WITH_TLS_JIT_TRUST) 963 if (wsi->stash && wsi->stash->cis[CIS_ADDRESS]) { 964 struct lws_vhost *vh = NULL; 965 lws_tls_jit_trust_vhost_bind(wsi->a.context, 966 wsi->stash->cis[CIS_ADDRESS], 967 &vh); 968 if (vh) { 969 if (!vh->count_bound_wsi && vh->grace_after_unref) { 970 lwsl_wsi_info(wsi, "%s in use\n", 971 vh->lc.gutag); 972 lws_sul_cancel(&vh->sul_unref); 973 } 974 vh->count_bound_wsi++; 975 wsi->a.vhost = vh; 976 } 977 } 978#endif 979 980 return; 981 } 982#endif 983 984 /* outermost destroy notification for wsi (user_space still intact) */ 985 if (wsi->a.vhost) 986 wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY, 987 wsi->user_space, NULL, 0); 988 989#ifdef LWS_WITH_CGI 990 if (wsi->http.cgi) { 991 lws_spawn_piped_destroy(&wsi->http.cgi->lsp); 992 lws_sul_cancel(&wsi->http.cgi->sul_grace); 993 lws_free_set_NULL(wsi->http.cgi); 994 } 995#endif 996 997#if defined(LWS_WITH_SYS_FAULT_INJECTION) 998 lws_fi_destroy(&wsi->fic); 999#endif 1000 1001 __lws_wsi_remove_from_sul(wsi); 1002 sanity_assert_no_wsi_traces(wsi->a.context, wsi); 1003 __lws_free_wsi(wsi); 1004} 1005 1006 1007void 1008lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller) 1009{ 1010 struct lws_context *cx = wsi->a.context; 1011 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; 1012 1013 lws_context_lock(cx, __func__); 1014 1015 lws_pt_lock(pt, __func__); 1016 /* may destroy vhost, cannot hold vhost lock outside it */ 1017 __lws_close_free_wsi(wsi, reason, caller); 1018 lws_pt_unlock(pt); 1019 1020 lws_context_unlock(cx); 1021} 1022 1023 1024