1#include <stdlib.h> 2#include <string.h> 3#include <sys/stat.h> 4 5#ifndef _WIN32 6# include <sys/types.h> 7#endif /* _WIN32 */ 8 9#include "uv.h" 10#include "fd_table.h" 11#include "path_resolver.h" 12#include "wasi_types.h" 13#include "wasi_rights.h" 14#include "uv_mapping.h" 15#include "uvwasi_alloc.h" 16 17 18static uvwasi_errno_t uvwasi__insert_stdio(uvwasi_t* uvwasi, 19 struct uvwasi_fd_table_t* table, 20 const uvwasi_fd_t fd, 21 const uvwasi_fd_t expected, 22 const char* name) { 23 struct uvwasi_fd_wrap_t* wrap; 24 uvwasi_filetype_t type; 25 uvwasi_rights_t base; 26 uvwasi_rights_t inheriting; 27 uvwasi_errno_t err; 28 29 err = uvwasi__get_filetype_by_fd(fd, &type); 30 if (err != UVWASI_ESUCCESS) 31 return err; 32 33 err = uvwasi__get_rights(fd, UV_FS_O_RDWR, type, &base, &inheriting); 34 if (err != UVWASI_ESUCCESS) 35 return err; 36 37 err = uvwasi_fd_table_insert(uvwasi, 38 table, 39 fd, 40 NULL, 41 name, 42 name, 43 type, 44 base, 45 inheriting, 46 0, 47 &wrap); 48 if (err != UVWASI_ESUCCESS) 49 return err; 50 51 if (wrap->id != expected) 52 err = UVWASI_EBADF; 53 54 uv_mutex_unlock(&wrap->mutex); 55 return err; 56} 57 58 59uvwasi_errno_t uvwasi_fd_table_insert(uvwasi_t* uvwasi, 60 struct uvwasi_fd_table_t* table, 61 uv_file fd, 62 uv_tcp_t* sock, 63 const char* mapped_path, 64 const char* real_path, 65 uvwasi_filetype_t type, 66 uvwasi_rights_t rights_base, 67 uvwasi_rights_t rights_inheriting, 68 int preopen, 69 struct uvwasi_fd_wrap_t** wrap) { 70 struct uvwasi_fd_wrap_t* entry; 71 struct uvwasi_fd_wrap_t** new_fds; 72 uvwasi_errno_t err; 73 uint32_t new_size; 74 uint32_t index; 75 uint32_t i; 76 int r; 77 size_t mp_len; 78 char* mp_copy; 79 size_t rp_len; 80 char* rp_copy; 81 char* np_copy; 82 83 if (type != UVWASI_FILETYPE_SOCKET_STREAM) { 84 mp_len = strlen(mapped_path); 85 rp_len = strlen(real_path); 86 } else { 87 mp_len = 0; 88 rp_len = 0; 89 rp_copy = NULL; 90 mp_copy = NULL; 91 np_copy = NULL; 92 } 93 94 /* Reserve room for the mapped path, real path, and normalized mapped path. */ 95 entry = (struct uvwasi_fd_wrap_t*) 96 uvwasi__malloc(uvwasi, sizeof(*entry) + mp_len + mp_len + rp_len + 3); 97 if (entry == NULL) 98 return UVWASI_ENOMEM; 99 100 if (type != UVWASI_FILETYPE_SOCKET_STREAM) { 101 mp_copy = (char*)(entry + 1); 102 rp_copy = mp_copy + mp_len + 1; 103 np_copy = rp_copy + rp_len + 1; 104 memcpy(mp_copy, mapped_path, mp_len); 105 mp_copy[mp_len] = '\0'; 106 memcpy(rp_copy, real_path, rp_len); 107 rp_copy[rp_len] = '\0'; 108 109 /* Calculate the normalized version of the mapped path, as it will be used for 110 any path calculations on this fd. Use the length of the mapped path as an 111 upper bound for the normalized path length. */ 112 err = uvwasi__normalize_path(mp_copy, mp_len, np_copy, mp_len); 113 if (err) { 114 uvwasi__free(uvwasi, entry); 115 goto exit; 116 } 117 } 118 119 uv_rwlock_wrlock(&table->rwlock); 120 121 /* Check that there is room for a new item. If there isn't, grow the table. */ 122 if (table->used >= table->size) { 123 new_size = table->size * 2; 124 new_fds = uvwasi__realloc(uvwasi, table->fds, new_size * sizeof(*new_fds)); 125 if (new_fds == NULL) { 126 uvwasi__free(uvwasi, entry); 127 err = UVWASI_ENOMEM; 128 goto exit; 129 } 130 131 for (i = table->size; i < new_size; ++i) 132 new_fds[i] = NULL; 133 134 index = table->size; 135 table->fds = new_fds; 136 table->size = new_size; 137 } else { 138 /* The table is big enough, so find an empty slot for the new data. */ 139 int valid_slot = 0; 140 for (i = 0; i < table->size; ++i) { 141 if (table->fds[i] == NULL) { 142 valid_slot = 1; 143 index = i; 144 break; 145 } 146 } 147 148 /* This should never happen. */ 149 if (valid_slot == 0) { 150 uvwasi__free(uvwasi, entry); 151 err = UVWASI_ENOSPC; 152 goto exit; 153 } 154 } 155 156 table->fds[index] = entry; 157 158 r = uv_mutex_init(&entry->mutex); 159 if (r != 0) { 160 err = uvwasi__translate_uv_error(r); 161 goto exit; 162 } 163 164 entry->id = index; 165 entry->fd = fd; 166 entry->sock = sock; 167 entry->path = mp_copy; 168 entry->real_path = rp_copy; 169 entry->normalized_path = np_copy; 170 entry->type = type; 171 entry->rights_base = rights_base; 172 entry->rights_inheriting = rights_inheriting; 173 entry->preopen = preopen; 174 175 if (wrap != NULL) { 176 uv_mutex_lock(&entry->mutex); 177 *wrap = entry; 178 } 179 180 table->used++; 181 err = UVWASI_ESUCCESS; 182exit: 183 uv_rwlock_wrunlock(&table->rwlock); 184 return err; 185} 186 187 188uvwasi_errno_t uvwasi_fd_table_init(uvwasi_t* uvwasi, 189 const uvwasi_options_t* options) { 190 struct uvwasi_fd_table_t* table; 191 uvwasi_errno_t err; 192 int r; 193 194 /* Require an initial size of at least three to store the stdio FDs. */ 195 if (uvwasi == NULL || options == NULL || options->fd_table_size < 3) 196 return UVWASI_EINVAL; 197 198 table = uvwasi__malloc(uvwasi, sizeof(*table)); 199 if (table == NULL) 200 return UVWASI_ENOMEM; 201 202 table->used = 0; 203 table->size = options->fd_table_size; 204 table->fds = uvwasi__calloc(uvwasi, 205 options->fd_table_size, 206 sizeof(struct uvwasi_fd_wrap_t*)); 207 if (table->fds == NULL) { 208 uvwasi__free(uvwasi, table); 209 return UVWASI_ENOMEM; 210 } 211 212 r = uv_rwlock_init(&table->rwlock); 213 if (r != 0) { 214 err = uvwasi__translate_uv_error(r); 215 uvwasi__free(uvwasi, table->fds); 216 uvwasi__free(uvwasi, table); 217 return err; 218 } 219 220 /* Create the stdio FDs. */ 221 err = uvwasi__insert_stdio(uvwasi, table, options->in, 0, "<stdin>"); 222 if (err != UVWASI_ESUCCESS) 223 goto error_exit; 224 225 err = uvwasi__insert_stdio(uvwasi, table, options->out, 1, "<stdout>"); 226 if (err != UVWASI_ESUCCESS) 227 goto error_exit; 228 229 err = uvwasi__insert_stdio(uvwasi, table, options->err, 2, "<stderr>"); 230 if (err != UVWASI_ESUCCESS) 231 goto error_exit; 232 233 uvwasi->fds = table; 234 return UVWASI_ESUCCESS; 235error_exit: 236 uvwasi_fd_table_free(uvwasi, table); 237 return err; 238} 239 240 241void uvwasi_fd_table_free(uvwasi_t* uvwasi, struct uvwasi_fd_table_t* table) { 242 struct uvwasi_fd_wrap_t* entry; 243 uint32_t i; 244 245 if (uvwasi == NULL || table == NULL) 246 return; 247 248 for (i = 0; i < table->size; i++) { 249 entry = table->fds[i]; 250 251 if (entry == NULL) 252 continue; 253 254 uv_mutex_destroy(&entry->mutex); 255 uvwasi__free(uvwasi, entry); 256 } 257 258 if (table->fds != NULL) { 259 uvwasi__free(uvwasi, table->fds); 260 table->fds = NULL; 261 table->size = 0; 262 table->used = 0; 263 uv_rwlock_destroy(&table->rwlock); 264 } 265 266 uvwasi__free(uvwasi, table); 267} 268 269 270uvwasi_errno_t uvwasi_fd_table_insert_preopen(uvwasi_t* uvwasi, 271 struct uvwasi_fd_table_t* table, 272 const uv_file fd, 273 const char* path, 274 const char* real_path) { 275 uvwasi_filetype_t type; 276 uvwasi_rights_t base; 277 uvwasi_rights_t inheriting; 278 uvwasi_errno_t err; 279 280 if (table == NULL || path == NULL || real_path == NULL) 281 return UVWASI_EINVAL; 282 283 err = uvwasi__get_filetype_by_fd(fd, &type); 284 if (err != UVWASI_ESUCCESS) 285 return err; 286 287 if (type != UVWASI_FILETYPE_DIRECTORY) 288 return UVWASI_ENOTDIR; 289 290 err = uvwasi__get_rights(fd, 0, type, &base, &inheriting); 291 if (err != UVWASI_ESUCCESS) 292 return err; 293 294 return uvwasi_fd_table_insert(uvwasi, 295 table, 296 fd, 297 NULL, 298 path, 299 real_path, 300 UVWASI_FILETYPE_DIRECTORY, 301 UVWASI__RIGHTS_DIRECTORY_BASE, 302 UVWASI__RIGHTS_DIRECTORY_INHERITING, 303 1, 304 NULL); 305} 306 307 308uvwasi_errno_t uvwasi_fd_table_insert_preopen_socket(uvwasi_t* uvwasi, 309 struct uvwasi_fd_table_t* table, 310 uv_tcp_t* sock) { 311 if (table == NULL || sock == NULL) 312 return UVWASI_EINVAL; 313 314 return uvwasi_fd_table_insert(uvwasi, 315 table, 316 -1, 317 sock, 318 NULL, 319 NULL, 320 UVWASI_FILETYPE_SOCKET_STREAM, 321 UVWASI__RIGHTS_SOCKET_BASE, 322 UVWASI__RIGHTS_SOCKET_INHERITING, 323 1, 324 NULL); 325} 326 327 328uvwasi_errno_t uvwasi_fd_table_get(struct uvwasi_fd_table_t* table, 329 const uvwasi_fd_t id, 330 struct uvwasi_fd_wrap_t** wrap, 331 uvwasi_rights_t rights_base, 332 uvwasi_rights_t rights_inheriting) { 333 uvwasi_errno_t err; 334 335 if (table == NULL) 336 return UVWASI_EINVAL; 337 338 uv_rwlock_wrlock(&table->rwlock); 339 err = uvwasi_fd_table_get_nolock(table, 340 id, 341 wrap, 342 rights_base, 343 rights_inheriting); 344 uv_rwlock_wrunlock(&table->rwlock); 345 return err; 346} 347 348 349/* uvwasi_fd_table_get_nolock() retrieves a file descriptor and locks its mutex, 350 but does not lock the file descriptor table like uvwasi_fd_table_get() does. 351*/ 352uvwasi_errno_t uvwasi_fd_table_get_nolock(struct uvwasi_fd_table_t* table, 353 const uvwasi_fd_t id, 354 struct uvwasi_fd_wrap_t** wrap, 355 uvwasi_rights_t rights_base, 356 uvwasi_rights_t rights_inheriting) { 357 struct uvwasi_fd_wrap_t* entry; 358 359 if (table == NULL || wrap == NULL) 360 return UVWASI_EINVAL; 361 362 if (id >= table->size) 363 return UVWASI_EBADF; 364 365 entry = table->fds[id]; 366 367 if (entry == NULL || entry->id != id) 368 return UVWASI_EBADF; 369 370 /* Validate that the fd has the necessary rights. */ 371 if ((~entry->rights_base & rights_base) != 0 || 372 (~entry->rights_inheriting & rights_inheriting) != 0) { 373 return UVWASI_ENOTCAPABLE; 374 } 375 376 uv_mutex_lock(&entry->mutex); 377 *wrap = entry; 378 return UVWASI_ESUCCESS; 379} 380 381 382uvwasi_errno_t uvwasi_fd_table_remove_nolock(uvwasi_t* uvwasi, 383 struct uvwasi_fd_table_t* table, 384 const uvwasi_fd_t id) { 385 struct uvwasi_fd_wrap_t* entry; 386 387 if (table == NULL) 388 return UVWASI_EINVAL; 389 390 if (id >= table->size) 391 return UVWASI_EBADF; 392 393 entry = table->fds[id]; 394 395 if (entry == NULL || entry->id != id) 396 return UVWASI_EBADF; 397 398 uv_mutex_destroy(&entry->mutex); 399 uvwasi__free(uvwasi, entry); 400 table->fds[id] = NULL; 401 table->used--; 402 return UVWASI_ESUCCESS; 403} 404 405 406uvwasi_errno_t uvwasi_fd_table_renumber(struct uvwasi_s* uvwasi, 407 struct uvwasi_fd_table_t* table, 408 const uvwasi_fd_t dst, 409 const uvwasi_fd_t src) { 410 struct uvwasi_fd_wrap_t* dst_entry; 411 struct uvwasi_fd_wrap_t* src_entry; 412 uv_fs_t req; 413 uvwasi_errno_t err; 414 int r; 415 416 if (uvwasi == NULL || table == NULL) 417 return UVWASI_EINVAL; 418 419 if (dst == src) 420 return UVWASI_ESUCCESS; 421 422 uv_rwlock_wrlock(&table->rwlock); 423 424 if (dst >= table->size || src >= table->size) { 425 err = UVWASI_EBADF; 426 goto exit; 427 } 428 429 dst_entry = table->fds[dst]; 430 src_entry = table->fds[src]; 431 432 if (dst_entry == NULL || dst_entry->id != dst || 433 src_entry == NULL || src_entry->id != src) { 434 err = UVWASI_EBADF; 435 goto exit; 436 } 437 438 uv_mutex_lock(&dst_entry->mutex); 439 uv_mutex_lock(&src_entry->mutex); 440 441 /* Close the existing destination descriptor. */ 442 r = uv_fs_close(NULL, &req, dst_entry->fd, NULL); 443 uv_fs_req_cleanup(&req); 444 if (r != 0) { 445 uv_mutex_unlock(&src_entry->mutex); 446 uv_mutex_unlock(&dst_entry->mutex); 447 err = uvwasi__translate_uv_error(r); 448 goto exit; 449 } 450 451 /* Move the source entry to the destination slot in the table. */ 452 table->fds[dst] = table->fds[src]; 453 table->fds[dst]->id = dst; 454 uv_mutex_unlock(&table->fds[dst]->mutex); 455 table->fds[src] = NULL; 456 table->used--; 457 458 /* Clean up what's left of the old destination entry. */ 459 uv_mutex_unlock(&dst_entry->mutex); 460 uv_mutex_destroy(&dst_entry->mutex); 461 uvwasi__free(uvwasi, dst_entry); 462 463 err = UVWASI_ESUCCESS; 464exit: 465 uv_rwlock_wrunlock(&table->rwlock); 466 return err; 467} 468 469 470uvwasi_errno_t uvwasi_fd_table_lock(struct uvwasi_fd_table_t* table) { 471 if (table == NULL) 472 return UVWASI_EINVAL; 473 474 uv_rwlock_wrlock(&table->rwlock); 475 return UVWASI_ESUCCESS; 476} 477 478 479uvwasi_errno_t uvwasi_fd_table_unlock(struct uvwasi_fd_table_t* table) { 480 if (table == NULL) 481 return UVWASI_EINVAL; 482 483 uv_rwlock_wrunlock(&table->rwlock); 484 return UVWASI_ESUCCESS; 485} 486