1/* 2 * libwebsockets - small server side websockets and web server implementation 3 * 4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 */ 24 25#include "private-lib-core.h" 26 27int 28lws_plat_service(struct lws_context *context, int timeout_ms) 29{ 30 int n = _lws_plat_service_tsi(context, timeout_ms, 0); 31 32#if !defined(LWS_AMAZON_RTOS) 33 esp_task_wdt_reset(); 34#endif 35 36 return n; 37} 38 39 40int 41_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) 42{ 43 volatile struct lws_context_per_thread *vpt; 44 struct lws_context_per_thread *pt; 45 lws_usec_t timeout_us; 46 int n = -1, m, c, a = 0; 47 48 /* stay dead once we are dead */ 49 50 if (!context) 51 return 1; 52 53 pt = &context->pt[tsi]; 54 vpt = (volatile struct lws_context_per_thread *)pt; 55 56 { 57 unsigned long m = lws_now_secs(); 58 59 if (m > context->time_last_state_dump) { 60 context->time_last_state_dump = m; 61#if defined(LWS_ESP_PLATFORM) 62 n = esp_get_free_heap_size(); 63#else 64 n = xPortGetFreeHeapSize(); 65#endif 66 if ((unsigned int)n != context->last_free_heap) { 67 if ((unsigned int)n > context->last_free_heap) 68 lwsl_debug(" heap :%ld (+%ld)\n", 69 (unsigned long)n, 70 (unsigned long)(n - 71 context->last_free_heap)); 72 else 73 lwsl_debug(" heap :%ld (-%ld)\n", 74 (unsigned long)n, 75 (unsigned long)( 76 context->last_free_heap - 77 n)); 78 context->last_free_heap = n; 79 } 80 } 81 } 82 83 if (timeout_ms < 0) 84 timeout_ms = 0; 85 else 86 /* force a default timeout of 23 days */ 87 timeout_ms = 2000000000; 88 timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS; 89 90 if (!pt->service_tid_detected && context->vhost_list) { 91 lws_fakewsi_def_plwsa(pt); 92 93 lws_fakewsi_prep_plwsa_ctx(context); 94 95 pt->service_tid = context->vhost_list->protocols[0].callback( 96 (struct lws *)plwsa, LWS_CALLBACK_GET_THREAD_ID, 97 NULL, NULL, 0); 98 pt->service_tid_detected = 1; 99 } 100 101 /* 102 * is there anybody with pending stuff that needs service forcing? 103 */ 104#if !defined(LWS_AMAZON_RTOS) 105again: 106#endif 107 n = 0; 108 if (lws_service_adjust_timeout(context, 1, tsi)) { 109#if defined(LWS_AMAZON_RTOS) 110again: 111#endif /* LWS_AMAZON_RTOS */ 112 113 a = 0; 114 if (timeout_us) { 115 lws_usec_t us; 116 117 lws_pt_lock(pt, __func__); 118 /* don't stay in poll wait longer than next hr timeout */ 119 us = __lws_sul_service_ripe(pt->pt_sul_owner, 120 LWS_COUNT_PT_SUL_OWNERS, 121 lws_now_usecs()); 122 if (us && us < timeout_us) 123 timeout_us = us; 124 125 lws_pt_unlock(pt); 126 } 127 128 // n = poll(pt->fds, pt->fds_count, timeout_ms); 129 { 130 fd_set readfds, writefds, errfds; 131 struct timeval tv = { timeout_us / LWS_US_PER_SEC, 132 timeout_us % LWS_US_PER_SEC }, *ptv = &tv; 133 int max_fd = 0; 134 FD_ZERO(&readfds); 135 FD_ZERO(&writefds); 136 FD_ZERO(&errfds); 137 138 for (n = 0; n < (int)pt->fds_count; n++) { 139 pt->fds[n].revents = 0; 140 if (pt->fds[n].fd >= max_fd) 141 max_fd = pt->fds[n].fd; 142 if (pt->fds[n].events & LWS_POLLIN) 143 FD_SET(pt->fds[n].fd, &readfds); 144 if (pt->fds[n].events & LWS_POLLOUT) 145 FD_SET(pt->fds[n].fd, &writefds); 146 FD_SET(pt->fds[n].fd, &errfds); 147 } 148 149 vpt->inside_poll = 1; 150 lws_memory_barrier(); 151 n = select(max_fd + 1, &readfds, &writefds, &errfds, ptv); 152 vpt->inside_poll = 0; 153 lws_memory_barrier(); 154 n = 0; 155 156 for (m = 0; m < (int)pt->fds_count; m++) { 157 c = 0; 158 if (FD_ISSET(pt->fds[m].fd, &readfds)) { 159 pt->fds[m].revents |= LWS_POLLIN; 160 c = 1; 161 } 162 if (FD_ISSET(pt->fds[m].fd, &writefds)) { 163 pt->fds[m].revents |= LWS_POLLOUT; 164 c = 1; 165 } 166 if (FD_ISSET(pt->fds[m].fd, &errfds)) { 167 // lwsl_notice("errfds %d\n", pt->fds[m].fd); 168 pt->fds[m].revents |= LWS_POLLHUP; 169 c = 1; 170 } 171 172 if (c) 173 n++; 174 } 175 } 176 177 m = 0; 178 179 #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS) 180 m |= !!pt->ws.rx_draining_ext_list; 181 #endif 182 183#if defined(LWS_WITH_TLS) 184 if (pt->context->tls_ops && 185 pt->context->tls_ops->fake_POLLIN_for_buffered) 186 m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); 187#endif 188 if (!m && !n) 189 return 0; 190 } else 191 a = 1; 192 193 m = lws_service_flag_pending(context, tsi); 194 c = m ? -1 : n; 195 196 /* any socket with events to service? */ 197 for (n = 0; n < (int)pt->fds_count && c; n++) { 198 if (!pt->fds[n].revents) 199 continue; 200 201 c--; 202 203 m = lws_service_fd_tsi(context, &pt->fds[n], tsi); 204 if (m < 0) 205 return -1; 206 /* if something closed, retry this slot */ 207 if (m) 208 n--; 209 } 210 211 if (a) 212 goto again; 213 214 return 0; 215} 216