1/* 2 * Copyright © 2015-2018 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#undef _FILE_OFFSET_BITS /* prevent #define open open64 */ 25 26#include <string.h> 27#include <stdlib.h> 28#include <stdio.h> 29#include <stdint.h> 30#include <stdarg.h> 31#include <fcntl.h> 32#include <unistd.h> 33#include <sys/ioctl.h> 34#include <sys/stat.h> 35#include <sys/mman.h> 36#include <sys/sysmacros.h> 37#include <dlfcn.h> 38#include <pthread.h> 39#include "drm-uapi/i915_drm.h" 40 41#include "util/hash_table.h" 42#include "util/u_math.h" 43 44#define MESA_LOG_TAG "INTEL-SANITIZE-GPU" 45#include "util/log.h" 46#include "common/intel_clflush.h" 47 48static int (*libc_open)(const char *pathname, int flags, mode_t mode); 49static int (*libc_close)(int fd); 50static int (*libc_ioctl)(int fd, unsigned long request, void *argp); 51static int (*libc_fcntl)(int fd, int cmd, int param); 52 53#define DRM_MAJOR 226 54 55/* TODO: we want to make sure that the padding forces 56 * the BO to take another page on the (PP)GTT; 4KB 57 * may or may not be the page size for the BO. Indeed, 58 * depending on GPU, kernel version and GEM size, the 59 * page size can be one of 4KB, 64KB or 2M. 60 */ 61#define PADDING_SIZE 4096 62 63struct refcnt_hash_table { 64 struct hash_table *t; 65 int refcnt; 66}; 67 68pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; 69#define MUTEX_LOCK() do { \ 70 if (unlikely(pthread_mutex_lock(&mutex))) { \ 71 mesa_loge("mutex_lock failed"); \ 72 abort(); \ 73 } \ 74} while (0) 75#define MUTEX_UNLOCK() do { \ 76 if (unlikely(pthread_mutex_unlock(&mutex))) { \ 77 mesa_loge("mutex_unlock failed"); \ 78 abort(); \ 79 } \ 80} while (0) 81 82static struct hash_table *fds_to_bo_sizes = NULL; 83 84static inline struct hash_table* 85bo_size_table(int fd) 86{ 87 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes, 88 (void*)(uintptr_t)fd); 89 return e ? ((struct refcnt_hash_table*)e->data)->t : NULL; 90} 91 92static inline uint64_t 93bo_size(int fd, uint32_t handle) 94{ 95 struct hash_table *t = bo_size_table(fd); 96 if (!t) 97 return UINT64_MAX; 98 struct hash_entry *e = _mesa_hash_table_search(t, (void*)(uintptr_t)handle); 99 return e ? (uint64_t)(uintptr_t)e->data : UINT64_MAX; 100} 101 102static inline bool 103is_drm_fd(int fd) 104{ 105 return !!bo_size_table(fd); 106} 107 108static inline void 109add_drm_fd(int fd) 110{ 111 struct refcnt_hash_table *r = malloc(sizeof(*r)); 112 r->refcnt = 1; 113 r->t = _mesa_pointer_hash_table_create(NULL); 114 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)fd, 115 (void*)(uintptr_t)r); 116} 117 118static inline void 119dup_drm_fd(int old_fd, int new_fd) 120{ 121 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes, 122 (void*)(uintptr_t)old_fd); 123 struct refcnt_hash_table *r = e->data; 124 r->refcnt++; 125 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)new_fd, 126 (void*)(uintptr_t)r); 127} 128 129static inline void 130del_drm_fd(int fd) 131{ 132 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes, 133 (void*)(uintptr_t)fd); 134 struct refcnt_hash_table *r = e->data; 135 if (!--r->refcnt) { 136 _mesa_hash_table_remove(fds_to_bo_sizes, e); 137 _mesa_hash_table_destroy(r->t, NULL); 138 free(r); 139 } 140} 141 142/* Our goal is not to have noise good enough for crypto, 143 * but instead values that are unique-ish enough that 144 * it is incredibly unlikely that a buffer overwrite 145 * will produce the exact same values. 146 */ 147static uint8_t 148next_noise_value(uint8_t prev_noise) 149{ 150 uint32_t v = prev_noise; 151 return (v * 103u + 227u) & 0xFF; 152} 153 154static void 155fill_noise_buffer(uint8_t *dst, uint8_t start, uint32_t length) 156{ 157 for(uint32_t i = 0; i < length; ++i) { 158 dst[i] = start; 159 start = next_noise_value(start); 160 } 161} 162 163static bool 164padding_is_good(int fd, uint32_t handle) 165{ 166 struct drm_i915_gem_mmap mmap_arg = { 167 .handle = handle, 168 .offset = align64(bo_size(fd, handle), 4096), 169 .size = PADDING_SIZE, 170 .flags = 0, 171 }; 172 173 /* Unknown bo, maybe prime or userptr. Ignore */ 174 if (mmap_arg.offset == UINT64_MAX) 175 return true; 176 177 uint8_t *mapped; 178 int ret; 179 uint8_t expected_value; 180 181 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); 182 if (ret != 0) { 183 mesa_logd("Unable to map buffer %d for pad checking.", handle); 184 return false; 185 } 186 187 mapped = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr; 188 /* bah-humbug, we need to see the latest contents and 189 * if the bo is not cache coherent we likely need to 190 * invalidate the cache lines to get it. 191 */ 192 intel_invalidate_range(mapped, PADDING_SIZE); 193 194 expected_value = handle & 0xFF; 195 for (uint32_t i = 0; i < PADDING_SIZE; ++i) { 196 if (expected_value != mapped[i]) { 197 munmap(mapped, PADDING_SIZE); 198 return false; 199 } 200 expected_value = next_noise_value(expected_value); 201 } 202 munmap(mapped, PADDING_SIZE); 203 204 return true; 205} 206 207static int 208create_with_padding(int fd, struct drm_i915_gem_create *create) 209{ 210 uint64_t original_size = create->size; 211 212 create->size = align64(original_size, 4096) + PADDING_SIZE; 213 int ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, create); 214 create->size = original_size; 215 216 if (ret != 0) 217 return ret; 218 219 uint8_t *noise_values; 220 struct drm_i915_gem_mmap mmap_arg = { 221 .handle = create->handle, 222 .offset = align64(create->size, 4096), 223 .size = PADDING_SIZE, 224 .flags = 0, 225 }; 226 227 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); 228 if (ret != 0) { 229 mesa_logd("Unable to map buffer %d for pad creation.\n", create->handle); 230 return 0; 231 } 232 233 noise_values = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr; 234 fill_noise_buffer(noise_values, create->handle & 0xFF, 235 PADDING_SIZE); 236 munmap(noise_values, PADDING_SIZE); 237 238 _mesa_hash_table_insert(bo_size_table(fd), (void*)(uintptr_t)create->handle, 239 (void*)(uintptr_t)create->size); 240 241 return 0; 242} 243 244static int 245exec_and_check_padding(int fd, unsigned long request, 246 struct drm_i915_gem_execbuffer2 *exec) 247{ 248 int ret = libc_ioctl(fd, request, exec); 249 if (ret != 0) 250 return ret; 251 252 struct drm_i915_gem_exec_object2 *objects = 253 (void*)(uintptr_t)exec->buffers_ptr; 254 uint32_t batch_bo = exec->flags & I915_EXEC_BATCH_FIRST ? objects[0].handle : 255 objects[exec->buffer_count - 1].handle; 256 257 struct drm_i915_gem_wait wait = { 258 .bo_handle = batch_bo, 259 .timeout_ns = -1, 260 }; 261 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait); 262 if (ret != 0) 263 return ret; 264 265 bool detected_out_of_bounds_write = false; 266 267 for (int i = 0; i < exec->buffer_count; i++) { 268 uint32_t handle = objects[i].handle; 269 270 if (!padding_is_good(fd, handle)) { 271 detected_out_of_bounds_write = true; 272 mesa_loge("Detected buffer out-of-bounds write in bo %d", handle); 273 } 274 } 275 276 if (unlikely(detected_out_of_bounds_write)) { 277 abort(); 278 } 279 280 return 0; 281} 282 283static int 284gem_close(int fd, struct drm_gem_close *close) 285{ 286 int ret = libc_ioctl(fd, DRM_IOCTL_GEM_CLOSE, close); 287 if (ret != 0) 288 return ret; 289 290 struct hash_table *t = bo_size_table(fd); 291 struct hash_entry *e = 292 _mesa_hash_table_search(t, (void*)(uintptr_t)close->handle); 293 294 if (e) 295 _mesa_hash_table_remove(t, e); 296 297 return 0; 298} 299 300static bool 301is_i915(int fd) { 302 struct stat stat; 303 if (fstat(fd, &stat)) 304 return false; 305 306 if (!S_ISCHR(stat.st_mode) || major(stat.st_rdev) != DRM_MAJOR) 307 return false; 308 309 char name[5] = ""; 310 drm_version_t version = { 311 .name = name, 312 .name_len = sizeof(name) - 1, 313 }; 314 if (libc_ioctl(fd, DRM_IOCTL_VERSION, &version)) 315 return false; 316 317 return strcmp("i915", name) == 0; 318} 319 320__attribute__ ((visibility ("default"))) int 321open(const char *path, int flags, ...) 322{ 323 va_list args; 324 mode_t mode; 325 326 va_start(args, flags); 327 mode = va_arg(args, int); 328 va_end(args); 329 330 int fd = libc_open(path, flags, mode); 331 332 MUTEX_LOCK(); 333 334 if (fd >= 0 && is_i915(fd)) 335 add_drm_fd(fd); 336 337 MUTEX_UNLOCK(); 338 339 return fd; 340} 341 342__attribute__ ((visibility ("default"), alias ("open"))) int 343open64(const char *path, int flags, ...); 344 345__attribute__ ((visibility ("default"))) int 346close(int fd) 347{ 348 MUTEX_LOCK(); 349 350 if (is_drm_fd(fd)) 351 del_drm_fd(fd); 352 353 MUTEX_UNLOCK(); 354 355 return libc_close(fd); 356} 357 358__attribute__ ((visibility ("default"))) int 359fcntl(int fd, int cmd, ...) 360{ 361 va_list args; 362 int param; 363 364 va_start(args, cmd); 365 param = va_arg(args, int); 366 va_end(args); 367 368 int res = libc_fcntl(fd, cmd, param); 369 370 MUTEX_LOCK(); 371 372 if (is_drm_fd(fd) && cmd == F_DUPFD_CLOEXEC) 373 dup_drm_fd(fd, res); 374 375 MUTEX_UNLOCK(); 376 377 return res; 378} 379 380__attribute__ ((visibility ("default"))) int 381ioctl(int fd, unsigned long request, ...) 382{ 383 int res; 384 va_list args; 385 void *argp; 386 387 MUTEX_LOCK(); 388 389 va_start(args, request); 390 argp = va_arg(args, void *); 391 va_end(args); 392 393 if (_IOC_TYPE(request) == DRM_IOCTL_BASE && !is_drm_fd(fd) && is_i915(fd)) { 394 mesa_loge("missed drm fd %d", fd); 395 add_drm_fd(fd); 396 } 397 398 if (is_drm_fd(fd)) { 399 switch (request) { 400 case DRM_IOCTL_GEM_CLOSE: 401 res = gem_close(fd, (struct drm_gem_close*)argp); 402 goto out; 403 404 case DRM_IOCTL_I915_GEM_CREATE: 405 res = create_with_padding(fd, (struct drm_i915_gem_create*)argp); 406 goto out; 407 408 case DRM_IOCTL_I915_GEM_EXECBUFFER2: 409 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: 410 res = exec_and_check_padding(fd, request, 411 (struct drm_i915_gem_execbuffer2*)argp); 412 goto out; 413 414 default: 415 break; 416 } 417 } 418 res = libc_ioctl(fd, request, argp); 419 420 out: 421 MUTEX_UNLOCK(); 422 return res; 423} 424 425static void __attribute__ ((constructor)) 426init(void) 427{ 428 fds_to_bo_sizes = _mesa_pointer_hash_table_create(NULL); 429 libc_open = dlsym(RTLD_NEXT, "open"); 430 libc_close = dlsym(RTLD_NEXT, "close"); 431 libc_fcntl = dlsym(RTLD_NEXT, "fcntl"); 432 libc_ioctl = dlsym(RTLD_NEXT, "ioctl"); 433} 434