1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * Permission is hereby granted, free of charge, to any person obtaining a copy 3 * of this software and associated documentation files (the "Software"), to 4 * deal in the Software without restriction, including without limitation the 5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 6 * sell copies of the Software, and to permit persons to whom the Software is 7 * furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 18 * IN THE SOFTWARE. 19 */ 20 21#include "uv.h" 22#include "uv-common.h" 23#include "heap-inl.h" 24 25#include <assert.h> 26#include <limits.h> 27 28#ifdef ASYNC_STACKTRACE 29#include "dfx/async_stack/libuv_async_stack.h" 30#endif 31 32static struct heap *timer_heap(const uv_loop_t* loop) { 33#ifdef _WIN32 34 return (struct heap*) loop->timer_heap; 35#else 36 return (struct heap*) &loop->timer_heap; 37#endif 38} 39 40 41static int timer_less_than(const struct heap_node* ha, 42 const struct heap_node* hb) { 43 const uv_timer_t* a; 44 const uv_timer_t* b; 45 46 a = container_of(ha, uv_timer_t, heap_node); 47 b = container_of(hb, uv_timer_t, heap_node); 48 49 if (a->timeout < b->timeout) 50 return 1; 51 if (b->timeout < a->timeout) 52 return 0; 53 54 /* Compare start_id when both have the same timeout. start_id is 55 * allocated with loop->timer_counter in uv_timer_start(). 56 */ 57 return a->start_id < b->start_id; 58} 59 60 61int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) { 62 uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER); 63 handle->timer_cb = NULL; 64 handle->timeout = 0; 65 handle->repeat = 0; 66 return 0; 67} 68 69 70int uv_timer_start(uv_timer_t* handle, 71 uv_timer_cb cb, 72 uint64_t timeout, 73 uint64_t repeat) { 74 uint64_t clamped_timeout; 75 76 if (uv__is_closing(handle) || cb == NULL) 77 return UV_EINVAL; 78 79 if (uv__is_active(handle)) 80 uv_timer_stop(handle); 81 82 clamped_timeout = handle->loop->time + timeout; 83 if (clamped_timeout < timeout) 84 clamped_timeout = (uint64_t) -1; 85 86 handle->timer_cb = cb; 87 handle->timeout = clamped_timeout; 88 handle->repeat = repeat; 89 /* start_id is the second index to be compared in timer_less_than() */ 90 handle->start_id = handle->loop->timer_counter++; 91 92#ifdef ASYNC_STACKTRACE 93 handle->u.reserved[3] = (void*)LibuvCollectAsyncStack(); 94#endif 95 96 heap_insert(timer_heap(handle->loop), 97 (struct heap_node*) &handle->heap_node, 98 timer_less_than); 99 uv__handle_start(handle); 100#ifdef __linux__ 101 if (uv_check_data_valid((struct uv_loop_data*)handle->loop->data) == 0) { 102 uv_async_send(&handle->loop->wq_async); 103 } 104#endif 105 return 0; 106} 107 108 109int uv_timer_stop(uv_timer_t* handle) { 110 if (!uv__is_active(handle)) 111 return 0; 112 113 heap_remove(timer_heap(handle->loop), 114 (struct heap_node*) &handle->heap_node, 115 timer_less_than); 116 uv__handle_stop(handle); 117 118 return 0; 119} 120 121 122int uv_timer_again(uv_timer_t* handle) { 123 if (handle->timer_cb == NULL) 124 return UV_EINVAL; 125 126 if (handle->repeat) { 127 uv_timer_stop(handle); 128 uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat); 129 } 130 131 return 0; 132} 133 134 135void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) { 136 handle->repeat = repeat; 137} 138 139 140uint64_t uv_timer_get_repeat(const uv_timer_t* handle) { 141 return handle->repeat; 142} 143 144 145uint64_t uv_timer_get_due_in(const uv_timer_t* handle) { 146 if (handle->loop->time >= handle->timeout) 147 return 0; 148 149 return handle->timeout - handle->loop->time; 150} 151 152 153int uv__next_timeout(const uv_loop_t* loop) { 154 const struct heap_node* heap_node; 155 const uv_timer_t* handle; 156 uint64_t diff; 157 158 heap_node = heap_min(timer_heap(loop)); 159 if (heap_node == NULL) 160 return -1; /* block indefinitely */ 161 162 handle = container_of(heap_node, uv_timer_t, heap_node); 163 if (handle->timeout <= loop->time) 164 return 0; 165 166 diff = handle->timeout - loop->time; 167 if (diff > INT_MAX) 168 diff = INT_MAX; 169 170 return (int) diff; 171} 172 173 174void uv__run_timers(uv_loop_t* loop) { 175 struct heap_node* heap_node; 176 uv_timer_t* handle; 177 178 for (;;) { 179 heap_node = heap_min(timer_heap(loop)); 180 if (heap_node == NULL) 181 break; 182 183 handle = container_of(heap_node, uv_timer_t, heap_node); 184 if (handle->timeout > loop->time) 185 break; 186 187 uv_timer_stop(handle); 188 uv_timer_again(handle); 189#ifdef ASYNC_STACKTRACE 190 LibuvSetStackId((uint64_t)handle->u.reserved[3]); 191#endif 192 handle->timer_cb(handle); 193 } 194} 195 196 197void uv__timer_close(uv_timer_t* handle) { 198 uv_timer_stop(handle); 199} 200