1/*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "private-lib-core.h"
26
27void
28__lws_wsi_remove_from_sul(struct lws *wsi)
29{
30	lws_sul_cancel(&wsi->sul_timeout);
31	lws_sul_cancel(&wsi->sul_hrtimer);
32	lws_sul_cancel(&wsi->sul_validity);
33#if defined(LWS_WITH_SYS_FAULT_INJECTION)
34	lws_sul_cancel(&wsi->sul_fault_timedclose);
35#endif
36}
37
38/*
39 * hrtimer
40 */
41
42static void
43lws_sul_hrtimer_cb(lws_sorted_usec_list_t *sul)
44{
45	struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer);
46
47	if (wsi->a.protocol &&
48	    wsi->a.protocol->callback(wsi, LWS_CALLBACK_TIMER,
49				    wsi->user_space, NULL, 0))
50		__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
51				     "hrtimer cb errored");
52}
53
54void
55__lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
56{
57	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
58
59	wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb;
60	__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
61			    &wsi->sul_hrtimer, us);
62}
63
64void
65lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
66{
67	__lws_set_timer_usecs(wsi, usecs);
68}
69
70/*
71 * wsi timeout
72 */
73
74static void
75lws_sul_wsitimeout_cb(lws_sorted_usec_list_t *sul)
76{
77	struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout);
78	struct lws_context *cx = wsi->a.context;
79	struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi];
80
81	/* no need to log normal idle keepalive timeout */
82//		if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
83#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
84	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
85		lwsl_wsi_info(wsi, "TIMEDOUT WAITING %d, dhdr %d, ah %p, wl %d",
86				   wsi->pending_timeout,
87				   wsi->hdr_parsing_completed, wsi->http.ah,
88				   pt->http.ah_wait_list_length);
89#if defined(LWS_WITH_CGI)
90	if (wsi->http.cgi)
91		lwsl_wsi_notice(wsi, "CGI timeout: %s", wsi->http.cgi->summary);
92#endif
93#else
94	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
95		lwsl_wsi_info(wsi, "TIMEDOUT WAITING on %d ",
96				   wsi->pending_timeout);
97#endif
98	/* cgi timeout */
99	if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
100		/*
101		 * Since he failed a timeout, he already had a chance to
102		 * do something and was unable to... that includes
103		 * situations like half closed connections.  So process
104		 * this "failed timeout" close as a violent death and
105		 * don't try to do protocol cleanup like flush partials.
106		 */
107		wsi->socket_is_permanently_unusable = 1;
108#if defined(LWS_WITH_CLIENT)
109	if (lwsi_state(wsi) == LRS_WAITING_SSL)
110		lws_inform_client_conn_fail(wsi,
111			(void *)"Timed out waiting SSL", 21);
112	if (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY)
113		lws_inform_client_conn_fail(wsi,
114			(void *)"Timed out waiting server reply", 30);
115#endif
116
117	lws_context_lock(cx, __func__);
118	lws_pt_lock(pt, __func__);
119	__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
120	lws_pt_unlock(pt);
121	lws_context_unlock(cx);
122}
123
124void
125__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
126{
127	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
128
129	wsi->sul_timeout.cb = lws_sul_wsitimeout_cb;
130	__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
131			    &wsi->sul_timeout,
132			    ((lws_usec_t)secs) * LWS_US_PER_SEC);
133
134	lwsl_wsi_debug(wsi, "%d secs, reason %d\n", secs, reason);
135
136	wsi->pending_timeout = (char)reason;
137}
138
139void
140lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
141{
142	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
143
144	lws_context_lock(pt->context, __func__);
145	lws_pt_lock(pt, __func__);
146	lws_dll2_remove(&wsi->sul_timeout.list);
147	lws_pt_unlock(pt);
148
149	if (!secs)
150		goto bail;
151
152	if (secs == LWS_TO_KILL_SYNC) {
153		lwsl_wsi_debug(wsi, "TO_KILL_SYNC");
154		lws_context_unlock(pt->context);
155		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
156				   "to sync kill");
157		return;
158	}
159
160	if (secs == LWS_TO_KILL_ASYNC)
161		secs = 0;
162
163	// assert(!secs || !wsi->mux_stream_immortal);
164	if (secs && wsi->mux_stream_immortal)
165		lwsl_wsi_err(wsi, "on immortal stream %d %d", reason, secs);
166
167	lws_pt_lock(pt, __func__);
168	__lws_set_timeout(wsi, reason, secs);
169	lws_pt_unlock(pt);
170
171bail:
172	lws_context_unlock(pt->context);
173}
174
175void
176lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
177{
178	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
179
180	lws_pt_lock(pt, __func__);
181	lws_dll2_remove(&wsi->sul_timeout.list);
182	lws_pt_unlock(pt);
183
184	if (!us)
185		return;
186
187	lws_pt_lock(pt, __func__);
188	__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
189			    &wsi->sul_timeout, us);
190
191	lwsl_wsi_notice(wsi, "%llu us, reason %d",
192			     (unsigned long long)us, reason);
193
194	wsi->pending_timeout = (char)reason;
195	lws_pt_unlock(pt);
196}
197
198static void
199lws_validity_cb(lws_sorted_usec_list_t *sul)
200{
201	struct lws *wsi = lws_container_of(sul, struct lws, sul_validity);
202	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
203	const lws_retry_bo_t *rbo = wsi->retry_policy;
204
205	/* one of either the ping or hangup validity threshold was crossed */
206
207	if (wsi->validity_hup) {
208		lwsl_wsi_info(wsi, "validity too old");
209		struct lws_context *cx = wsi->a.context;
210		struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi];
211
212		lws_context_lock(cx, __func__);
213		lws_pt_lock(pt, __func__);
214		__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
215				     "validity timeout");
216		lws_pt_unlock(pt);
217		lws_context_unlock(cx);
218		return;
219	}
220
221	/* schedule a protocol-dependent ping */
222
223	lwsl_wsi_info(wsi, "scheduling validity check");
224
225	if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive))
226		lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive).
227							issue_keepalive(wsi, 0);
228
229	/*
230	 * We arrange to come back here after the additional ping to hangup time
231	 * and do the hangup, unless we get validated (by, eg, a PONG) and
232	 * reset the timer
233	 */
234
235	assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping);
236
237	wsi->validity_hup = 1;
238	__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
239			    &wsi->sul_validity,
240			    ((uint64_t)rbo->secs_since_valid_hangup -
241				 rbo->secs_since_valid_ping) * LWS_US_PER_SEC);
242}
243
244/*
245 * The role calls this back to actually confirm validity on a particular wsi
246 * (which may not be the original wsi)
247 */
248
249void
250_lws_validity_confirmed_role(struct lws *wsi)
251{
252	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
253	const lws_retry_bo_t *rbo = wsi->retry_policy;
254
255	if (!rbo || !rbo->secs_since_valid_hangup)
256		return;
257
258	wsi->validity_hup = 0;
259	wsi->sul_validity.cb = lws_validity_cb;
260
261	wsi->validity_hup = rbo->secs_since_valid_ping >=
262			    rbo->secs_since_valid_hangup;
263
264	lwsl_wsi_info(wsi, "setting validity timer %ds (hup %d)",
265			   wsi->validity_hup ? rbo->secs_since_valid_hangup :
266					    rbo->secs_since_valid_ping,
267			   wsi->validity_hup);
268
269	__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
270			    &wsi->sul_validity,
271			    ((uint64_t)(wsi->validity_hup ?
272				rbo->secs_since_valid_hangup :
273				rbo->secs_since_valid_ping)) * LWS_US_PER_SEC);
274}
275
276void
277lws_validity_confirmed(struct lws *wsi)
278{
279	/*
280	 * This may be a stream inside a muxed network connection... leave it
281	 * to the role to figure out who actually needs to understand their
282	 * validity was confirmed.
283	 */
284	if (!wsi->h2_stream_carries_ws && /* only if not encapsulated */
285	    wsi->role_ops &&
286	    lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive))
287		lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive).
288							issue_keepalive(wsi, 1);
289}
290