1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include <stdlib.h> 25#include <stdio.h> 26#include <string.h> 27#include <stdint.h> 28#include <stdbool.h> 29#include <signal.h> 30#include <stdarg.h> 31#include <fcntl.h> 32#include <sys/types.h> 33#include <sys/sysmacros.h> 34#include <sys/stat.h> 35#include <sys/ioctl.h> 36#include <unistd.h> 37#include <errno.h> 38#include <sys/mman.h> 39#include <dlfcn.h> 40#include "drm-uapi/i915_drm.h" 41#include <inttypes.h> 42 43#include "intel_aub.h" 44#include "aub_write.h" 45 46#include "dev/intel_debug.h" 47#include "dev/intel_device_info.h" 48#include "util/macros.h" 49 50static int close_init_helper(int fd); 51static int ioctl_init_helper(int fd, unsigned long request, ...); 52static int munmap_init_helper(void *addr, size_t length); 53 54static int (*libc_close)(int fd) = close_init_helper; 55static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper; 56static int (*libc_munmap)(void *addr, size_t length) = munmap_init_helper; 57 58static int drm_fd = -1; 59static char *output_filename = NULL; 60static FILE *output_file = NULL; 61static int verbose = 0; 62static bool device_override = false; 63static bool capture_only = false; 64static int64_t frame_id = -1; 65static bool capture_finished = false; 66 67#define MAX_FD_COUNT 64 68#define MAX_BO_COUNT 64 * 1024 69 70struct bo { 71 uint32_t size; 72 uint64_t offset; 73 void *map; 74 /* Whether the buffer has been positioned in the GTT already. */ 75 bool gtt_mapped : 1; 76 /* Tracks userspace mmapping of the buffer */ 77 bool user_mapped : 1; 78 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a 79 * buffer has been updated. 80 */ 81 bool dirty : 1; 82}; 83 84static struct bo *bos; 85 86#define DRM_MAJOR 226 87 88/* We set bit 0 in the map pointer for userptr BOs so we know not to 89 * munmap them on DRM_IOCTL_GEM_CLOSE. 90 */ 91#define USERPTR_FLAG 1 92#define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG) 93#define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) ) 94 95#define fail_if(cond, ...) _fail_if(cond, "intel_dump_gpu", __VA_ARGS__) 96 97static struct bo * 98get_bo(unsigned fd, uint32_t handle) 99{ 100 struct bo *bo; 101 102 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n"); 103 fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n"); 104 bo = &bos[handle + fd * MAX_BO_COUNT]; 105 106 return bo; 107} 108 109static inline uint32_t 110align_u32(uint32_t v, uint32_t a) 111{ 112 return (v + a - 1) & ~(a - 1); 113} 114 115static struct intel_device_info devinfo = {0}; 116static int device = 0; 117static struct aub_file aub_file; 118 119static void 120ensure_device_info(int fd) 121{ 122 /* We can't do this at open time as we're not yet authenticated. */ 123 if (device == 0) { 124 fail_if(!intel_get_device_info_from_fd(fd, &devinfo), 125 "failed to identify chipset.\n"); 126 device = devinfo.pci_device_id; 127 } else if (devinfo.ver == 0) { 128 fail_if(!intel_get_device_info_from_pci_id(device, &devinfo), 129 "failed to identify chipset.\n"); 130 } 131} 132 133static void * 134relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2, 135 const struct drm_i915_gem_exec_object2 *obj) 136{ 137 const struct drm_i915_gem_exec_object2 *exec_objects = 138 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr; 139 const struct drm_i915_gem_relocation_entry *relocs = 140 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr; 141 void *relocated; 142 int handle; 143 144 relocated = malloc(bo->size); 145 fail_if(relocated == NULL, "out of memory\n"); 146 memcpy(relocated, GET_PTR(bo->map), bo->size); 147 for (size_t i = 0; i < obj->relocation_count; i++) { 148 fail_if(relocs[i].offset >= bo->size, "reloc outside bo\n"); 149 150 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT) 151 handle = exec_objects[relocs[i].target_handle].handle; 152 else 153 handle = relocs[i].target_handle; 154 155 aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset, 156 get_bo(fd, handle)->offset + relocs[i].delta); 157 } 158 159 return relocated; 160} 161 162static int 163gem_ioctl(int fd, unsigned long request, void *argp) 164{ 165 int ret; 166 167 do { 168 ret = libc_ioctl(fd, request, argp); 169 } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 170 171 return ret; 172} 173 174static void * 175gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size) 176{ 177 struct drm_i915_gem_mmap mmap = { 178 .handle = handle, 179 .offset = offset, 180 .size = size 181 }; 182 183 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1) 184 return MAP_FAILED; 185 186 return (void *)(uintptr_t) mmap.addr_ptr; 187} 188 189static enum drm_i915_gem_engine_class 190engine_class_from_ring_flag(uint32_t ring_flag) 191{ 192 switch (ring_flag) { 193 case I915_EXEC_DEFAULT: 194 case I915_EXEC_RENDER: 195 return I915_ENGINE_CLASS_RENDER; 196 case I915_EXEC_BSD: 197 return I915_ENGINE_CLASS_VIDEO; 198 case I915_EXEC_BLT: 199 return I915_ENGINE_CLASS_COPY; 200 case I915_EXEC_VEBOX: 201 return I915_ENGINE_CLASS_VIDEO_ENHANCE; 202 default: 203 return I915_ENGINE_CLASS_INVALID; 204 } 205} 206 207static void 208dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2) 209{ 210 struct drm_i915_gem_exec_object2 *exec_objects = 211 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr; 212 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK; 213 uint32_t offset; 214 struct drm_i915_gem_exec_object2 *obj; 215 struct bo *bo, *batch_bo; 216 int batch_index; 217 void *data; 218 219 ensure_device_info(fd); 220 221 if (capture_finished) 222 return; 223 224 if (!aub_file.file) { 225 aub_file_init(&aub_file, output_file, 226 verbose == 2 ? stdout : NULL, 227 device, program_invocation_short_name); 228 aub_write_default_setup(&aub_file); 229 230 if (verbose) 231 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n", 232 output_filename, device, devinfo.ver); 233 } 234 235 if (aub_use_execlists(&aub_file)) 236 offset = 0x1000; 237 else 238 offset = aub_gtt_size(&aub_file); 239 240 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) { 241 obj = &exec_objects[i]; 242 bo = get_bo(fd, obj->handle); 243 244 /* If bo->size == 0, this means they passed us an invalid 245 * buffer. The kernel will reject it and so should we. 246 */ 247 if (bo->size == 0) { 248 if (verbose) 249 printf("BO #%d is invalid!\n", obj->handle); 250 return; 251 } 252 253 if (obj->flags & EXEC_OBJECT_PINNED) { 254 if (bo->offset != obj->offset) 255 bo->gtt_mapped = false; 256 bo->offset = obj->offset; 257 } else { 258 if (obj->alignment != 0) 259 offset = align_u32(offset, obj->alignment); 260 bo->offset = offset; 261 offset = align_u32(offset + bo->size + 4095, 4096); 262 } 263 264 if (bo->map == NULL && bo->size > 0) 265 bo->map = gem_mmap(fd, obj->handle, 0, bo->size); 266 fail_if(bo->map == MAP_FAILED, "bo mmap failed\n"); 267 } 268 269 uint64_t current_frame_id = 0; 270 if (frame_id >= 0) { 271 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) { 272 obj = &exec_objects[i]; 273 bo = get_bo(fd, obj->handle); 274 275 /* Check against frame_id requirements. */ 276 if (memcmp(bo->map, intel_debug_identifier(), 277 intel_debug_identifier_size()) == 0) { 278 const struct intel_debug_block_frame *frame_desc = 279 intel_debug_get_identifier_block(bo->map, bo->size, 280 INTEL_DEBUG_BLOCK_TYPE_FRAME); 281 282 current_frame_id = frame_desc ? frame_desc->frame_id : 0; 283 break; 284 } 285 } 286 } 287 288 if (verbose) 289 printf("Dumping execbuffer2 (frame_id=%"PRIu64", buffers=%u):\n", 290 current_frame_id, execbuffer2->buffer_count); 291 292 /* Check whether we can stop right now. */ 293 if (frame_id >= 0) { 294 if (current_frame_id < frame_id) 295 return; 296 297 if (current_frame_id > frame_id) { 298 aub_file_finish(&aub_file); 299 capture_finished = true; 300 return; 301 } 302 } 303 304 305 /* Map buffers into the PPGTT. */ 306 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) { 307 obj = &exec_objects[i]; 308 bo = get_bo(fd, obj->handle); 309 310 if (verbose) { 311 printf("BO #%d (%dB) @ 0x%" PRIx64 "\n", 312 obj->handle, bo->size, bo->offset); 313 } 314 315 if (aub_use_execlists(&aub_file) && !bo->gtt_mapped) { 316 aub_map_ppgtt(&aub_file, bo->offset, bo->size); 317 bo->gtt_mapped = true; 318 } 319 } 320 321 /* Write the buffer content into the Aub. */ 322 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 : 323 execbuffer2->buffer_count - 1; 324 batch_bo = get_bo(fd, exec_objects[batch_index].handle); 325 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) { 326 obj = &exec_objects[i]; 327 bo = get_bo(fd, obj->handle); 328 329 if (obj->relocation_count > 0) 330 data = relocate_bo(fd, bo, execbuffer2, obj); 331 else 332 data = bo->map; 333 334 bool write = !capture_only || (obj->flags & EXEC_OBJECT_CAPTURE); 335 336 if (write && bo->dirty) { 337 if (bo == batch_bo) { 338 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_BATCH, 339 GET_PTR(data), bo->size, bo->offset); 340 } else { 341 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_NOTYPE, 342 GET_PTR(data), bo->size, bo->offset); 343 } 344 345 if (!bo->user_mapped) 346 bo->dirty = false; 347 } 348 349 if (data != bo->map) 350 free(data); 351 } 352 353 uint32_t ctx_id = execbuffer2->rsvd1; 354 355 aub_write_exec(&aub_file, ctx_id, 356 batch_bo->offset + execbuffer2->batch_start_offset, 357 offset, engine_class_from_ring_flag(ring_flag)); 358 359 if (device_override && 360 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) { 361 struct drm_i915_gem_exec_fence *fences = 362 (void*)(uintptr_t)execbuffer2->cliprects_ptr; 363 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) { 364 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) { 365 struct drm_syncobj_array arg = { 366 .handles = (uintptr_t)&fences[i].handle, 367 .count_handles = 1, 368 .pad = 0, 369 }; 370 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg); 371 } 372 } 373 } 374} 375 376static void 377add_new_bo(unsigned fd, int handle, uint64_t size, void *map) 378{ 379 struct bo *bo = &bos[handle + fd * MAX_BO_COUNT]; 380 381 fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n"); 382 fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n"); 383 fail_if(size == 0, "bo size is invalid\n"); 384 385 bo->size = size; 386 bo->map = map; 387 bo->user_mapped = false; 388 bo->gtt_mapped = false; 389} 390 391static void 392remove_bo(int fd, int handle) 393{ 394 struct bo *bo = get_bo(fd, handle); 395 396 if (bo->map && !IS_USERPTR(bo->map)) 397 munmap(bo->map, bo->size); 398 memset(bo, 0, sizeof(*bo)); 399} 400 401__attribute__ ((visibility ("default"))) int 402close(int fd) 403{ 404 if (fd == drm_fd) 405 drm_fd = -1; 406 407 return libc_close(fd); 408} 409 410static int 411get_pci_id(int fd, int *pci_id) 412{ 413 struct drm_i915_getparam gparam; 414 415 if (device_override) { 416 *pci_id = device; 417 return 0; 418 } 419 420 gparam.param = I915_PARAM_CHIPSET_ID; 421 gparam.value = pci_id; 422 return libc_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gparam); 423} 424 425static void 426maybe_init(int fd) 427{ 428 static bool initialized = false; 429 FILE *config; 430 char *key, *value; 431 432 if (initialized) 433 return; 434 435 initialized = true; 436 437 const char *config_path = getenv("INTEL_DUMP_GPU_CONFIG"); 438 fail_if(config_path == NULL, "INTEL_DUMP_GPU_CONFIG is not set\n"); 439 440 config = fopen(config_path, "r"); 441 fail_if(config == NULL, "failed to open file %s\n", config_path); 442 443 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) { 444 if (!strcmp(key, "verbose")) { 445 if (!strcmp(value, "1")) { 446 verbose = 1; 447 } else if (!strcmp(value, "2")) { 448 verbose = 2; 449 } 450 } else if (!strcmp(key, "device")) { 451 fail_if(device != 0, "Device/Platform override specified multiple times.\n"); 452 fail_if(sscanf(value, "%i", &device) != 1, 453 "failed to parse device id '%s'\n", 454 value); 455 device_override = true; 456 } else if (!strcmp(key, "platform")) { 457 fail_if(device != 0, "Device/Platform override specified multiple times.\n"); 458 device = intel_device_name_to_pci_device_id(value); 459 fail_if(device == -1, "Unknown platform '%s'\n", value); 460 device_override = true; 461 } else if (!strcmp(key, "file")) { 462 free(output_filename); 463 if (output_file) 464 fclose(output_file); 465 output_filename = strdup(value); 466 output_file = fopen(output_filename, "w+"); 467 fail_if(output_file == NULL, 468 "failed to open file '%s'\n", 469 output_filename); 470 } else if (!strcmp(key, "capture_only")) { 471 capture_only = atoi(value); 472 } else if (!strcmp(key, "frame")) { 473 frame_id = atol(value); 474 } else { 475 fprintf(stderr, "unknown option '%s'\n", key); 476 } 477 478 free(key); 479 free(value); 480 } 481 fclose(config); 482 483 bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0])); 484 fail_if(bos == NULL, "out of memory\n"); 485 486 ASSERTED int ret = get_pci_id(fd, &device); 487 assert(ret == 0); 488 489 aub_file_init(&aub_file, output_file, 490 verbose == 2 ? stdout : NULL, 491 device, program_invocation_short_name); 492 aub_write_default_setup(&aub_file); 493 494 if (verbose) 495 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n", 496 output_filename, device, devinfo.ver); 497} 498 499__attribute__ ((visibility ("default"))) int 500ioctl(int fd, unsigned long request, ...) 501{ 502 va_list args; 503 void *argp; 504 int ret; 505 struct stat buf; 506 507 va_start(args, request); 508 argp = va_arg(args, void *); 509 va_end(args); 510 511 if (_IOC_TYPE(request) == DRM_IOCTL_BASE && 512 drm_fd != fd && fstat(fd, &buf) == 0 && 513 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) { 514 drm_fd = fd; 515 if (verbose) 516 printf("[intercept drm ioctl on fd %d]\n", fd); 517 } 518 519 if (fd == drm_fd) { 520 maybe_init(fd); 521 522 switch (request) { 523 case DRM_IOCTL_SYNCOBJ_WAIT: 524 case DRM_IOCTL_I915_GEM_WAIT: { 525 if (device_override) 526 return 0; 527 return libc_ioctl(fd, request, argp); 528 } 529 530 case DRM_IOCTL_I915_GET_RESET_STATS: { 531 if (device_override) { 532 struct drm_i915_reset_stats *stats = argp; 533 534 stats->reset_count = 0; 535 stats->batch_active = 0; 536 stats->batch_pending = 0; 537 return 0; 538 } 539 return libc_ioctl(fd, request, argp); 540 } 541 542 case DRM_IOCTL_I915_GETPARAM: { 543 struct drm_i915_getparam *getparam = argp; 544 545 ensure_device_info(fd); 546 547 if (getparam->param == I915_PARAM_CHIPSET_ID) 548 return get_pci_id(fd, getparam->value); 549 550 if (device_override) { 551 switch (getparam->param) { 552 case I915_PARAM_CS_TIMESTAMP_FREQUENCY: 553 *getparam->value = devinfo.timestamp_frequency; 554 return 0; 555 556 case I915_PARAM_HAS_WAIT_TIMEOUT: 557 case I915_PARAM_HAS_EXECBUF2: 558 case I915_PARAM_MMAP_VERSION: 559 case I915_PARAM_HAS_EXEC_ASYNC: 560 case I915_PARAM_HAS_EXEC_FENCE: 561 case I915_PARAM_HAS_EXEC_FENCE_ARRAY: 562 *getparam->value = 1; 563 return 0; 564 565 case I915_PARAM_HAS_EXEC_SOFTPIN: 566 *getparam->value = devinfo.ver >= 8 && devinfo.platform != INTEL_PLATFORM_CHV; 567 return 0; 568 569 default: 570 return -1; 571 } 572 } 573 574 return libc_ioctl(fd, request, argp); 575 } 576 577 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: { 578 struct drm_i915_gem_context_param *getparam = argp; 579 580 ensure_device_info(fd); 581 582 if (device_override) { 583 switch (getparam->param) { 584 case I915_CONTEXT_PARAM_GTT_SIZE: 585 if (devinfo.platform == INTEL_PLATFORM_EHL) 586 getparam->value = 1ull << 36; 587 else if (devinfo.ver >= 8 && devinfo.platform != INTEL_PLATFORM_CHV) 588 getparam->value = 1ull << 48; 589 else 590 getparam->value = 1ull << 31; 591 return 0; 592 593 default: 594 return -1; 595 } 596 } 597 598 return libc_ioctl(fd, request, argp); 599 } 600 601 case DRM_IOCTL_I915_GEM_EXECBUFFER: { 602 static bool once; 603 if (!once) { 604 fprintf(stderr, 605 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n"); 606 once = true; 607 } 608 return libc_ioctl(fd, request, argp); 609 } 610 611 case DRM_IOCTL_I915_GEM_EXECBUFFER2: 612 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: { 613 dump_execbuffer2(fd, argp); 614 if (device_override) 615 return 0; 616 617 return libc_ioctl(fd, request, argp); 618 } 619 620 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE: { 621 uint32_t *ctx_id = NULL; 622 struct drm_i915_gem_context_create *create = argp; 623 ret = 0; 624 if (!device_override) { 625 ret = libc_ioctl(fd, request, argp); 626 ctx_id = &create->ctx_id; 627 } 628 629 if (ret == 0) 630 create->ctx_id = aub_write_context_create(&aub_file, ctx_id); 631 632 return ret; 633 } 634 635 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT: { 636 uint32_t *ctx_id = NULL; 637 struct drm_i915_gem_context_create_ext *create = argp; 638 ret = 0; 639 if (!device_override) { 640 ret = libc_ioctl(fd, request, argp); 641 ctx_id = &create->ctx_id; 642 } 643 644 if (ret == 0) 645 create->ctx_id = aub_write_context_create(&aub_file, ctx_id); 646 647 return ret; 648 } 649 650 case DRM_IOCTL_I915_GEM_CREATE: { 651 struct drm_i915_gem_create *create = argp; 652 653 ret = libc_ioctl(fd, request, argp); 654 if (ret == 0) 655 add_new_bo(fd, create->handle, create->size, NULL); 656 657 return ret; 658 } 659 660 case DRM_IOCTL_I915_GEM_CREATE_EXT: { 661 struct drm_i915_gem_create_ext *create = argp; 662 663 ret = libc_ioctl(fd, request, argp); 664 if (ret == 0) 665 add_new_bo(fd, create->handle, create->size, NULL); 666 667 return ret; 668 } 669 670 case DRM_IOCTL_I915_GEM_USERPTR: { 671 struct drm_i915_gem_userptr *userptr = argp; 672 673 ret = libc_ioctl(fd, request, argp); 674 if (ret == 0) 675 add_new_bo(fd, userptr->handle, userptr->user_size, 676 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG)); 677 678 return ret; 679 } 680 681 case DRM_IOCTL_GEM_CLOSE: { 682 struct drm_gem_close *close = argp; 683 684 remove_bo(fd, close->handle); 685 686 return libc_ioctl(fd, request, argp); 687 } 688 689 case DRM_IOCTL_GEM_OPEN: { 690 struct drm_gem_open *open = argp; 691 692 ret = libc_ioctl(fd, request, argp); 693 if (ret == 0) 694 add_new_bo(fd, open->handle, open->size, NULL); 695 696 return ret; 697 } 698 699 case DRM_IOCTL_PRIME_FD_TO_HANDLE: { 700 struct drm_prime_handle *prime = argp; 701 702 ret = libc_ioctl(fd, request, argp); 703 if (ret == 0) { 704 off_t size; 705 706 size = lseek(prime->fd, 0, SEEK_END); 707 fail_if(size == -1, "failed to get prime bo size\n"); 708 add_new_bo(fd, prime->handle, size, NULL); 709 710 } 711 712 return ret; 713 } 714 715 case DRM_IOCTL_I915_GEM_MMAP: { 716 ret = libc_ioctl(fd, request, argp); 717 if (ret == 0) { 718 struct drm_i915_gem_mmap *mmap = argp; 719 struct bo *bo = get_bo(fd, mmap->handle); 720 bo->user_mapped = true; 721 bo->dirty = true; 722 } 723 return ret; 724 } 725 726 case DRM_IOCTL_I915_GEM_MMAP_OFFSET: { 727 ret = libc_ioctl(fd, request, argp); 728 if (ret == 0) { 729 struct drm_i915_gem_mmap_offset *mmap = argp; 730 struct bo *bo = get_bo(fd, mmap->handle); 731 bo->user_mapped = true; 732 bo->dirty = true; 733 } 734 return ret; 735 } 736 737 default: 738 return libc_ioctl(fd, request, argp); 739 } 740 } else { 741 return libc_ioctl(fd, request, argp); 742 } 743} 744 745static void 746init(void) 747{ 748 libc_close = dlsym(RTLD_NEXT, "close"); 749 libc_ioctl = dlsym(RTLD_NEXT, "ioctl"); 750 libc_munmap = dlsym(RTLD_NEXT, "munmap"); 751 fail_if(libc_close == NULL || libc_ioctl == NULL, 752 "failed to get libc ioctl or close\n"); 753} 754 755static int 756close_init_helper(int fd) 757{ 758 init(); 759 return libc_close(fd); 760} 761 762static int 763ioctl_init_helper(int fd, unsigned long request, ...) 764{ 765 va_list args; 766 void *argp; 767 768 va_start(args, request); 769 argp = va_arg(args, void *); 770 va_end(args); 771 772 init(); 773 return libc_ioctl(fd, request, argp); 774} 775 776static int 777munmap_init_helper(void *addr, size_t length) 778{ 779 init(); 780 for (uint32_t i = 0; i < MAX_FD_COUNT * MAX_BO_COUNT; i++) { 781 struct bo *bo = &bos[i]; 782 if (bo->map == addr) { 783 bo->user_mapped = false; 784 break; 785 } 786 } 787 return libc_munmap(addr, length); 788} 789 790static void __attribute__ ((destructor)) 791fini(void) 792{ 793 if (devinfo.ver != 0) { 794 free(output_filename); 795 if (!capture_finished) 796 aub_file_finish(&aub_file); 797 free(bos); 798 } 799} 800