1/*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "private-lib-core.h"
26
27#if defined(_DEBUG)
28void
29lws_service_assert_loop_thread(struct lws_context *cx, int tsi)
30{
31	if (!cx->event_loop_ops->foreign_thread)
32		/* we can't judge it */
33		return;
34
35	if (!cx->event_loop_ops->foreign_thread(cx, tsi))
36		/* OK */
37		return;
38
39	/*
40	 * Lws apis are NOT THREADSAFE with the sole exception of
41	 * lws_cancel_service().  If you look at the assert backtrace, you
42	 * should see you're illegally calling an lws api from another thread.
43	 */
44	assert(0);
45}
46#endif
47
48int
49lws_callback_as_writeable(struct lws *wsi)
50{
51	int n, m;
52
53	n = wsi->role_ops->writeable_cb[lwsi_role_server(wsi)];
54	m = user_callback_handle_rxflow(wsi->a.protocol->callback,
55					wsi, (enum lws_callback_reasons) n,
56					wsi->user_space, NULL, 0);
57
58	return m;
59}
60
61int
62lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
63{
64	volatile struct lws *vwsi = (volatile struct lws *)wsi;
65	int n;
66
67	if (wsi->socket_is_permanently_unusable)
68		return 0;
69
70	vwsi->leave_pollout_active = 0;
71	vwsi->handling_pollout = 1;
72	/*
73	 * if another thread wants POLLOUT on us, from here on while
74	 * handling_pollout is set, he will only set leave_pollout_active.
75	 * If we are going to disable POLLOUT, we will check that first.
76	 */
77	wsi->could_have_pending = 0; /* clear back-to-back write detection */
78
79	/*
80	 * user callback is lowest priority to get these notifications
81	 * actually, since other pending things cannot be disordered
82	 *
83	 * Priority 1: pending truncated sends are incomplete ws fragments
84	 *	       If anything else sent first the protocol would be
85	 *	       corrupted.
86	 *
87	 *	       These are post- any compression transform
88	 */
89
90	if (lws_has_buffered_out(wsi)) {
91		if (lws_issue_raw(wsi, NULL, 0) < 0) {
92			lwsl_wsi_info(wsi, "signalling to close");
93			goto bail_die;
94		}
95		/* leave POLLOUT active either way */
96		goto bail_ok;
97	} else
98		if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
99			wsi->socket_is_permanently_unusable = 1;
100			goto bail_die; /* retry closing now */
101		}
102
103	/* Priority 2: pre- compression transform */
104
105#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
106	if (wsi->http.comp_ctx.buflist_comp ||
107	    wsi->http.comp_ctx.may_have_more) {
108		enum lws_write_protocol wp = LWS_WRITE_HTTP;
109
110		lwsl_wsi_info(wsi, "compl comp partial (buflist_comp %p, may %d)",
111				   wsi->http.comp_ctx.buflist_comp,
112				   wsi->http.comp_ctx.may_have_more);
113
114		if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol) &&
115		    lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol).
116					write_role_protocol(wsi, NULL, 0, &wp) < 0) {
117			lwsl_wsi_info(wsi, "signalling to close");
118			goto bail_die;
119		}
120		lws_callback_on_writable(wsi);
121
122		goto bail_ok;
123	}
124#endif
125
126#ifdef LWS_WITH_CGI
127	/*
128	 * A cgi connection's wire protocol remains h1 or h2.  He is just
129	 * getting his data from his child cgis.
130	 */
131	if (wsi->http.cgi) {
132		/* also one shot */
133		if (pollfd)
134			if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
135				lwsl_wsi_info(wsi, "failed at set pollfd");
136				return 1;
137			}
138		goto user_service_go_again;
139	}
140#endif
141
142	/* if we got here, we should have wire protocol ops set on the wsi */
143	assert(wsi->role_ops);
144
145	if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_handle_POLLOUT))
146		goto bail_ok;
147
148	n = lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_handle_POLLOUT).
149							handle_POLLOUT(wsi);
150	switch (n) {
151	case LWS_HP_RET_BAIL_OK:
152		goto bail_ok;
153	case LWS_HP_RET_BAIL_DIE:
154		goto bail_die;
155	case LWS_HP_RET_DROP_POLLOUT:
156	case LWS_HP_RET_USER_SERVICE:
157		break;
158	default:
159		assert(0);
160	}
161
162	/* one shot */
163
164	if (pollfd) {
165		int eff = vwsi->leave_pollout_active;
166
167		if (!eff) {
168			if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
169				lwsl_wsi_info(wsi, "failed at set pollfd");
170				goto bail_die;
171			}
172		}
173
174		vwsi->handling_pollout = 0;
175
176		/* cannot get leave_pollout_active set after the above */
177		if (!eff && wsi->leave_pollout_active) {
178			/*
179			 * got set inbetween sampling eff and clearing
180			 * handling_pollout, force POLLOUT on
181			 */
182			lwsl_wsi_debug(wsi, "leave_pollout_active");
183			if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) {
184				lwsl_wsi_info(wsi, "failed at set pollfd");
185				goto bail_die;
186			}
187		}
188
189		vwsi->leave_pollout_active = 0;
190	}
191
192	if (lwsi_role_client(wsi) && !wsi->hdr_parsing_completed &&
193	     lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS &&
194	     lwsi_state(wsi) != LRS_ISSUE_HTTP_BODY)
195		goto bail_ok;
196
197	if (n == LWS_HP_RET_DROP_POLLOUT)
198		goto bail_ok;
199
200
201#ifdef LWS_WITH_CGI
202user_service_go_again:
203#endif
204
205	if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_perform_user_POLLOUT)) {
206		if (lws_rops_func_fidx(wsi->role_ops,
207				       LWS_ROPS_perform_user_POLLOUT).
208						perform_user_POLLOUT(wsi) == -1)
209			goto bail_die;
210		else
211			goto bail_ok;
212	}
213
214	lwsl_wsi_debug(wsi, "non mux: wsistate 0x%lx, ops %s",
215			    (unsigned long)wsi->wsistate, wsi->role_ops->name);
216
217	vwsi = (volatile struct lws *)wsi;
218	vwsi->leave_pollout_active = 0;
219
220	n = lws_callback_as_writeable(wsi);
221	vwsi->handling_pollout = 0;
222
223	if (vwsi->leave_pollout_active)
224		if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
225			goto bail_die;
226
227	return n;
228
229	/*
230	 * since these don't disable the POLLOUT, they are always doing the
231	 * right thing for leave_pollout_active whether it was set or not.
232	 */
233
234bail_ok:
235	vwsi->handling_pollout = 0;
236	vwsi->leave_pollout_active = 0;
237
238	return 0;
239
240bail_die:
241	vwsi->handling_pollout = 0;
242	vwsi->leave_pollout_active = 0;
243
244	return -1;
245}
246
247int
248lws_rxflow_cache(struct lws *wsi, unsigned char *buf, size_t n, size_t len)
249{
250	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
251	uint8_t *buffered;
252	size_t blen;
253	int ret = LWSRXFC_CACHED, m;
254
255	/* his RX is flowcontrolled, don't send remaining now */
256	blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
257	if (blen) {
258		if (buf >= buffered && buf + len <= buffered + blen &&
259		    blen != (size_t)len) {
260			/*
261			 * rxflow while we were spilling prev rxflow
262			 *
263			 * len indicates how much was unused, then... so trim
264			 * the head buflist to match that situation
265			 */
266
267			lws_buflist_use_segment(&wsi->buflist, blen - len);
268			lwsl_wsi_debug(wsi, "trim existing rxflow %d -> %d",
269					    (int)blen, (int)len);
270
271			return LWSRXFC_TRIMMED;
272		}
273		ret = LWSRXFC_ADDITIONAL;
274	}
275
276	/* a new rxflow, buffer it and warn caller */
277
278	lwsl_wsi_debug(wsi, "rxflow append %d", (int)(len - n));
279	m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
280
281	if (m < 0)
282		return LWSRXFC_ERROR;
283	if (m) {
284		lwsl_wsi_debug(wsi, "added to rxflow list");;
285		if (lws_dll2_is_detached(&wsi->dll_buflist))
286			lws_dll2_add_head(&wsi->dll_buflist, &pt->dll_buflist_owner);
287	}
288
289	return ret;
290}
291
292/* this is used by the platform service code to stop us waiting for network
293 * activity in poll() when we have something that already needs service
294 */
295
296int
297lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
298{
299	struct lws_context_per_thread *pt;
300
301	if (!context)
302		return 1;
303
304        if (!context->protocol_init_done)
305                if (lws_protocol_init(context))
306                        return 1;
307
308#if defined(LWS_WITH_SYS_SMD)
309	if (!tsi && lws_smd_message_pending(context)) {
310		lws_smd_msg_distribute(context);
311		if (lws_smd_message_pending(context))
312			return 0;
313	}
314#endif
315
316	pt = &context->pt[tsi];
317
318	if (pt->evlib_pt) {
319		lws_usec_t u;
320
321		lws_pt_lock(pt, __func__); /* -------------- pt { */
322
323		u = __lws_sul_service_ripe(pt->pt_sul_owner,
324				      LWS_COUNT_PT_SUL_OWNERS, lws_now_usecs());
325		/*
326		 * We will come back with 0 if nothing to do at the moment, or
327		 * the number of us until something to do
328		 */
329		if (u && u < (lws_usec_t)timeout_ms * (lws_usec_t)1000)
330			timeout_ms = (int)(u / 1000);
331
332		lws_pt_unlock(pt);
333	}
334
335	/*
336	 * Figure out if we really want to wait in poll()... we only need to
337	 * wait if really nothing already to do and we have to wait for
338	 * something from network
339	 */
340#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
341	/* 1) if we know we are draining rx ext, do not wait in poll */
342	if (pt->ws.rx_draining_ext_list)
343		return 0;
344#endif
345
346#if defined(LWS_WITH_TLS)
347	/* 2) if we know we have non-network pending data,
348	 *    do not wait in poll */
349
350	if (pt->context->tls_ops &&
351	    pt->context->tls_ops->fake_POLLIN_for_buffered &&
352	    pt->context->tls_ops->fake_POLLIN_for_buffered(pt))
353			return 0;
354#endif
355
356	/*
357	 * 4) If there is any wsi with rxflow buffered and in a state to process
358	 *    it, we should not wait in poll
359	 */
360
361	lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
362		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
363
364		if (!lws_is_flowcontrolled(wsi) &&
365		     lwsi_state(wsi) != LRS_DEFERRING_ACTION)
366			return 0;
367
368	/*
369	 * 5) If any guys with http compression to spill, we shouldn't wait in
370	 *    poll but hurry along and service them
371	 */
372
373	} lws_end_foreach_dll(d);
374
375	return timeout_ms;
376}
377
378/*
379 * POLLIN said there is something... we must read it, and either use it; or
380 * if other material already in the buflist append it and return the buflist
381 * head material.
382 */
383int
384lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
385		       struct lws_tokens *ebuf, char fr, const char *hint)
386{
387	int n, e, bns;
388	uint8_t *ep, *b;
389
390	// lwsl_debug("%s: %s: %s: prior %d\n", __func__, lws_wsi_tag(wsi), hint, prior);
391	// lws_buflist_describe(&wsi->buflist, wsi, __func__);
392
393	(void)hint;
394	if (!ebuf->token)
395		ebuf->token = pt->serv_buf + LWS_PRE;
396	if (!ebuf->len ||
397	    (unsigned int)ebuf->len > wsi->a.context->pt_serv_buf_size - LWS_PRE)
398		ebuf->len = (int)(wsi->a.context->pt_serv_buf_size - LWS_PRE);
399
400	e = ebuf->len;
401	ep = ebuf->token;
402
403	/* h2 or muxed stream... must force the read due to HOL blocking */
404
405	if (wsi->mux_substream)
406		fr = 1;
407
408	/* there's something on the buflist? */
409
410	bns = (int)lws_buflist_next_segment_len(&wsi->buflist, &ebuf->token);
411	b = ebuf->token;
412
413	if (!fr && bns)
414		goto buflist_material;
415
416	/* we're going to read something */
417
418	ebuf->token = ep;
419	ebuf->len = n = lws_ssl_capable_read(wsi, ep, (size_t)e);
420
421	lwsl_wsi_debug(wsi, "%s: ssl_capable_read %d", hint, ebuf->len);
422
423	if (!bns && /* only acknowledge error when we handled buflist content */
424	    n == LWS_SSL_CAPABLE_ERROR) {
425		lwsl_debug("%s: SSL_CAPABLE_ERROR\n", __func__);
426		return -1;
427	}
428
429	if (n <= 0 && bns)
430		/*
431		 * There wasn't anything to read yet, but there's something
432		 * on the buflist to give him
433		 */
434		goto buflist_material;
435
436	/* we read something */
437
438	if (fr && bns) {
439		/*
440		 * Stash what we read, since there's earlier buflist material
441		 */
442
443		n = lws_buflist_append_segment(&wsi->buflist, ebuf->token, (size_t)ebuf->len);
444		if (n < 0)
445			return -1;
446		if (n && lws_dll2_is_detached(&wsi->dll_buflist))
447			lws_dll2_add_head(&wsi->dll_buflist,
448					  &pt->dll_buflist_owner);
449
450		goto buflist_material;
451	}
452
453	/*
454	 * directly return what we read
455	 */
456
457	return 0;
458
459buflist_material:
460
461	ebuf->token = b;
462	if (e < bns)
463		/* restrict to e, if more than e available */
464		ebuf->len = e;
465	else
466		ebuf->len = bns;
467
468	return 1; /* from buflist */
469}
470
471int
472lws_buflist_aware_finished_consuming(struct lws *wsi, struct lws_tokens *ebuf,
473				     int used, int buffered, const char *hint)
474{
475	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
476	int m;
477
478	/* it's in the buflist; we didn't use any */
479
480	if (!used && buffered)
481		return 0;
482
483	if (used && buffered) {
484		if (wsi->buflist) {
485			m = (int)lws_buflist_use_segment(&wsi->buflist,
486							 (size_t)used);
487			if (m)
488				return 0;
489		}
490
491		lwsl_wsi_info(wsi, "removed from dll_buflist");
492		lws_dll2_remove(&wsi->dll_buflist);
493
494		return 0;
495	}
496
497	/* any remainder goes on the buflist */
498
499	if (used < ebuf->len && ebuf->len >= 0 && used >= 0) {
500		m = lws_buflist_append_segment(&wsi->buflist,
501					       ebuf->token + used,
502					       (unsigned int)(ebuf->len - used));
503		if (m < 0)
504			return 1; /* OOM */
505		if (m) {
506			lwsl_wsi_debug(wsi, "added to rxflow list");
507			if (lws_dll2_is_detached(&wsi->dll_buflist))
508				lws_dll2_add_head(&wsi->dll_buflist,
509					 &pt->dll_buflist_owner);
510		}
511	}
512
513	return 0;
514}
515
516void
517lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
518{
519	struct lws_pollfd pfd;
520
521	if (!pt->dll_buflist_owner.head)
522		return;
523
524	/*
525	 * service all guys with pending rxflow that reached a state they can
526	 * accept the pending data
527	 */
528
529	lws_pt_lock(pt, __func__);
530
531	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
532				   pt->dll_buflist_owner.head) {
533		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
534
535		pfd.events = LWS_POLLIN;
536		pfd.revents = LWS_POLLIN;
537		pfd.fd = -1;
538
539		lwsl_wsi_debug(wsi, "rxflow processing: fc=%d, 0x%lx",
540				    lws_is_flowcontrolled(wsi),
541				    (unsigned long)wsi->wsistate);
542
543		if (!lws_is_flowcontrolled(wsi) &&
544		    lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
545			pt->inside_lws_service = 1;
546
547			if (lws_rops_func_fidx(wsi->role_ops,
548					       LWS_ROPS_handle_POLLIN).
549						handle_POLLIN(pt, wsi, &pfd) ==
550						   LWS_HPI_RET_PLEASE_CLOSE_ME)
551				lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
552						"close_and_handled");
553			pt->inside_lws_service = 0;
554		}
555
556	} lws_end_foreach_dll_safe(d, d1);
557
558	lws_pt_unlock(pt);
559}
560
561/*
562 * guys that need POLLIN service again without waiting for network action
563 * can force POLLIN here if not flowcontrolled, so they will get service.
564 *
565 * Return nonzero if anybody got their POLLIN faked
566 */
567int
568lws_service_flag_pending(struct lws_context *context, int tsi)
569{
570	struct lws_context_per_thread *pt;
571	int forced = 0;
572
573	if (!context)
574		return 1;
575
576	pt = &context->pt[tsi];
577
578	lws_pt_lock(pt, __func__);
579
580	/*
581	 * 1) If there is any wsi with a buflist and in a state to process
582	 *    it, we should not wait in poll
583	 */
584
585	lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
586		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
587
588		if (!lws_is_flowcontrolled(wsi) &&
589		     lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
590			forced = 1;
591			break;
592		}
593	} lws_end_foreach_dll(d);
594
595#if defined(LWS_ROLE_WS)
596	forced |= lws_rops_func_fidx(&role_ops_ws,
597				     LWS_ROPS_service_flag_pending).
598					service_flag_pending(context, tsi);
599#endif
600
601#if defined(LWS_WITH_TLS)
602	/*
603	 * 2) For all guys with buffered SSL read data already saved up, if they
604	 * are not flowcontrolled, fake their POLLIN status so they'll get
605	 * service to use up the buffered incoming data, even though their
606	 * network socket may have nothing
607	 */
608	lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
609			lws_dll2_get_head(&pt->tls.dll_pending_tls_owner)) {
610		struct lws *wsi = lws_container_of(p, struct lws,
611						   tls.dll_pending_tls);
612
613		if (wsi->position_in_fds_table >= 0) {
614
615			pt->fds[wsi->position_in_fds_table].revents = (short)(
616					pt->fds[wsi->position_in_fds_table].revents |
617				(pt->fds[wsi->position_in_fds_table].events &
618								LWS_POLLIN));
619			if (pt->fds[wsi->position_in_fds_table].revents &
620								LWS_POLLIN)
621				/*
622				 * We're not going to remove the wsi from the
623				 * pending tls list.  The processing will have
624				 * to do it if he exhausts the pending tls.
625				 */
626				forced = 1;
627		}
628
629	} lws_end_foreach_dll_safe(p, p1);
630#endif
631
632	lws_pt_unlock(pt);
633
634	return forced;
635}
636
637int
638lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
639		   int tsi)
640{
641	struct lws_context_per_thread *pt;
642	struct lws *wsi;
643	char cow = 0;
644
645	if (!context || context->service_no_longer_possible)
646		return -1;
647
648	pt = &context->pt[tsi];
649
650	if (pt->event_loop_pt_unused)
651		return -1;
652
653	if (!pollfd) {
654		/*
655		 * calling with NULL pollfd for periodic background processing
656		 * is no longer needed and is now illegal.
657		 */
658		assert(pollfd);
659		return -1;
660	}
661	assert(lws_socket_is_valid(pollfd->fd));
662
663	/* no, here to service a socket descriptor */
664	wsi = wsi_from_fd(context, pollfd->fd);
665	if (!wsi)
666		/* not lws connection ... leave revents alone and return */
667		return 0;
668
669#if LWS_MAX_SMP > 1
670	if (wsi->undergoing_init_from_other_pt)
671		/*
672		 * Temporary situation that other service thread is initializing
673		 * this wsi right now for use on our service thread.
674		 */
675		return 0;
676#endif
677
678	/*
679	 * so that caller can tell we handled, past here we need to
680	 * zero down pollfd->revents after handling
681	 */
682
683	/*
684	 * Whatever the situation with buffered rx packets, or explicitly read-
685	 * and-buffered rx going to be handled before we want to acknowledge the
686	 * socket is gone, any sign of HUP always immediately means no more tx
687	 * is possible.
688	 */
689
690	if ((pollfd->revents & LWS_POLLHUP) == LWS_POLLHUP) {
691		wsi->socket_is_permanently_unusable = 1;
692
693		if (!(pollfd->revents & pollfd->events & LWS_POLLIN)) {
694
695			/* ... there are no pending rx packets waiting... */
696
697			if (!lws_buflist_total_len(&wsi->buflist)) {
698
699				/*
700				 * ... nothing stashed in the buflist either,
701				 * so acknowledge the wsi is done
702				 */
703
704				lwsl_wsi_debug(wsi, "Session Socket %d dead",
705						    pollfd->fd);
706
707				goto close_and_handled;
708			}
709
710			/*
711			 * ... in fact we have some unread rx buffered in the
712			 * input buflist.  Hold off the closing a bit...
713			 */
714
715			lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, 3);
716		}
717	}
718
719#ifdef _WIN32
720	if (pollfd->revents & LWS_POLLOUT)
721		wsi->sock_send_blocking = FALSE;
722#endif
723
724#if defined(LWS_WITH_TLS)
725	if (lwsi_state(wsi) == LRS_SHUTDOWN &&
726	    lws_is_ssl(wsi) && wsi->tls.ssl) {
727		switch (__lws_tls_shutdown(wsi)) {
728		case LWS_SSL_CAPABLE_DONE:
729		case LWS_SSL_CAPABLE_ERROR:
730			goto close_and_handled;
731
732		case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
733		case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
734		case LWS_SSL_CAPABLE_MORE_SERVICE:
735			goto handled;
736		}
737	}
738#endif
739
740	if ((pollfd->revents & LWS_POLLOUT) == LWS_POLLOUT &&
741	    wsi->tls_read_wanted_write) {
742		/*
743		 * If this wsi has a pending WANT_WRITE from SSL_read(), it has
744		 * asked for a callback on writeable so it can retry the read.
745		 *
746		 *  Let's consume the POLLOUT by turning it into a POLLIIN, and
747		 *  setting a flag to request a new writeable
748		 */
749		wsi->tls_read_wanted_write = 0;
750		pollfd->revents &= ~(LWS_POLLOUT);
751		pollfd->revents |= LWS_POLLIN;
752		cow = 1;
753	}
754
755	wsi->could_have_pending = 0; /* clear back-to-back write detection */
756	pt->inside_lws_service = 1;
757
758	/* okay, what we came here to do... */
759
760	/* if we got here, we should have wire protocol ops set on the wsi */
761	assert(wsi->role_ops);
762
763	// lwsl_notice("%s: %s: wsistate 0x%x\n", __func__, wsi->role_ops->name,
764	//	    wsi->wsistate);
765
766	switch (lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_handle_POLLIN).
767					       handle_POLLIN(pt, wsi, pollfd)) {
768	case LWS_HPI_RET_WSI_ALREADY_DIED:
769		pt->inside_lws_service = 0;
770		return 1;
771	case LWS_HPI_RET_HANDLED:
772		break;
773	case LWS_HPI_RET_PLEASE_CLOSE_ME:
774		//lwsl_notice("%s: %s pollin says please close me\n", __func__,
775		//		wsi->role_ops->name);
776close_and_handled:
777		lwsl_wsi_debug(wsi, "Close and handled");
778		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
779				   "close_and_handled");
780#if defined(_DEBUG) && defined(LWS_WITH_LIBUV)
781		/*
782		 * confirm close has no problem being called again while
783		 * it waits for libuv service to complete the first async
784		 * close
785		 */
786		if (!strcmp(context->event_loop_ops->name, "libuv"))
787			lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
788					   "close_and_handled uv repeat test");
789#endif
790		/*
791		 * pollfd may point to something else after the close
792		 * due to pollfd swapping scheme on delete on some platforms
793		 * we can't clear revents now because it'd be the wrong guy's
794		 * revents
795		 */
796		pt->inside_lws_service = 0;
797		return 1;
798	default:
799		assert(0);
800	}
801#if defined(LWS_WITH_TLS)
802handled:
803#endif
804	pollfd->revents = 0;
805	if (cow)
806		lws_callback_on_writable(wsi);
807	pt->inside_lws_service = 0;
808
809	return 0;
810}
811
812int
813lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
814{
815	return lws_service_fd_tsi(context, pollfd, 0);
816}
817
818int
819lws_service(struct lws_context *context, int timeout_ms)
820{
821	struct lws_context_per_thread *pt;
822	int n;
823
824	if (!context)
825		return 1;
826
827	pt = &context->pt[0];
828	pt->inside_service = 1;
829
830	if (context->event_loop_ops->run_pt) {
831		/* we are configured for an event loop */
832		context->event_loop_ops->run_pt(context, 0);
833
834		pt->inside_service = 0;
835
836		return 1;
837	}
838	n = lws_plat_service(context, timeout_ms);
839
840	if (n != -1)
841		pt->inside_service = 0;
842
843	return n;
844}
845
846int
847lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
848{
849	struct lws_context_per_thread *pt;
850	int n;
851
852	if (!context)
853		return 1;
854
855	pt = &context->pt[tsi];
856	pt->inside_service = 1;
857#if LWS_MAX_SMP > 1
858	pt->self = pthread_self();
859#endif
860
861	if (context->event_loop_ops->run_pt) {
862		/* we are configured for an event loop */
863		context->event_loop_ops->run_pt(context, tsi);
864
865		pt->inside_service = 0;
866
867		return 1;
868	}
869
870	n = _lws_plat_service_tsi(context, timeout_ms, tsi);
871
872	pt->inside_service = 0;
873
874	return n;
875}
876