1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include <sys/ioctl.h> 25#include <sys/types.h> 26#include <sys/mman.h> 27#include <string.h> 28#include <errno.h> 29#include <unistd.h> 30#include <fcntl.h> 31 32#include "anv_private.h" 33#include "common/intel_defines.h" 34#include "common/intel_gem.h" 35 36/** 37 * Wrapper around DRM_IOCTL_I915_GEM_CREATE. 38 * 39 * Return gem handle, or 0 on failure. Gem handles are never 0. 40 */ 41uint32_t 42anv_gem_create(struct anv_device *device, uint64_t size) 43{ 44 struct drm_i915_gem_create gem_create = { 45 .size = size, 46 }; 47 48 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create); 49 if (ret != 0) { 50 /* FIXME: What do we do if this fails? */ 51 return 0; 52 } 53 54 return gem_create.handle; 55} 56 57void 58anv_gem_close(struct anv_device *device, uint32_t gem_handle) 59{ 60 struct drm_gem_close close = { 61 .handle = gem_handle, 62 }; 63 64 intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close); 65} 66 67uint32_t 68anv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size, 69 uint32_t flags, uint32_t num_regions, 70 struct drm_i915_gem_memory_class_instance *regions) 71{ 72 /* Check for invalid flags */ 73 assert((flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) == 0); 74 75 struct drm_i915_gem_create_ext_memory_regions ext_regions = { 76 .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, 77 .num_regions = num_regions, 78 .regions = (uintptr_t)regions, 79 }; 80 81 struct drm_i915_gem_create_ext gem_create = { 82 .size = anv_bo_size, 83 .extensions = (uintptr_t) &ext_regions, 84 .flags = flags, 85 }; 86 87 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE_EXT, 88 &gem_create); 89 if (ret != 0) { 90 return 0; 91 } 92 93 return gem_create.handle; 94} 95 96/** 97 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error. 98 */ 99static void* 100anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle, 101 uint64_t offset, uint64_t size, uint32_t flags) 102{ 103 struct drm_i915_gem_mmap_offset gem_mmap = { 104 .handle = gem_handle, 105 .flags = device->info.has_local_mem ? I915_MMAP_OFFSET_FIXED : 106 (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB, 107 }; 108 assert(offset == 0); 109 110 /* Get the fake offset back */ 111 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap); 112 if (ret != 0) 113 return MAP_FAILED; 114 115 /* And map it */ 116 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, 117 device->fd, gem_mmap.offset); 118 return map; 119} 120 121static void* 122anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle, 123 uint64_t offset, uint64_t size, uint32_t flags) 124{ 125 assert(!device->info.has_local_mem); 126 127 struct drm_i915_gem_mmap gem_mmap = { 128 .handle = gem_handle, 129 .offset = offset, 130 .size = size, 131 .flags = flags, 132 }; 133 134 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap); 135 if (ret != 0) 136 return MAP_FAILED; 137 138 return (void *)(uintptr_t) gem_mmap.addr_ptr; 139} 140 141/** 142 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error. 143 */ 144void* 145anv_gem_mmap(struct anv_device *device, uint32_t gem_handle, 146 uint64_t offset, uint64_t size, uint32_t flags) 147{ 148 void *map; 149 if (device->physical->has_mmap_offset) 150 map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags); 151 else 152 map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags); 153 154 if (map != MAP_FAILED) 155 VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1)); 156 157 return map; 158} 159 160/* This is just a wrapper around munmap, but it also notifies valgrind that 161 * this map is no longer valid. Pair this with anv_gem_mmap(). 162 */ 163void 164anv_gem_munmap(struct anv_device *device, void *p, uint64_t size) 165{ 166 VG(VALGRIND_FREELIKE_BLOCK(p, 0)); 167 munmap(p, size); 168} 169 170uint32_t 171anv_gem_userptr(struct anv_device *device, void *mem, size_t size) 172{ 173 struct drm_i915_gem_userptr userptr = { 174 .user_ptr = (__u64)((unsigned long) mem), 175 .user_size = size, 176 .flags = 0, 177 }; 178 179 if (device->physical->has_userptr_probe) 180 userptr.flags |= I915_USERPTR_PROBE; 181 182 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr); 183 if (ret == -1) 184 return 0; 185 186 return userptr.handle; 187} 188 189int 190anv_gem_set_caching(struct anv_device *device, 191 uint32_t gem_handle, uint32_t caching) 192{ 193 struct drm_i915_gem_caching gem_caching = { 194 .handle = gem_handle, 195 .caching = caching, 196 }; 197 198 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching); 199} 200 201int 202anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle, 203 uint32_t read_domains, uint32_t write_domain) 204{ 205 struct drm_i915_gem_set_domain gem_set_domain = { 206 .handle = gem_handle, 207 .read_domains = read_domains, 208 .write_domain = write_domain, 209 }; 210 211 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain); 212} 213 214/** 215 * Returns 0, 1, or negative to indicate error 216 */ 217int 218anv_gem_busy(struct anv_device *device, uint32_t gem_handle) 219{ 220 struct drm_i915_gem_busy busy = { 221 .handle = gem_handle, 222 }; 223 224 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); 225 if (ret < 0) 226 return ret; 227 228 return busy.busy != 0; 229} 230 231/** 232 * On error, \a timeout_ns holds the remaining time. 233 */ 234int 235anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns) 236{ 237 struct drm_i915_gem_wait wait = { 238 .bo_handle = gem_handle, 239 .timeout_ns = *timeout_ns, 240 .flags = 0, 241 }; 242 243 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); 244 *timeout_ns = wait.timeout_ns; 245 246 return ret; 247} 248 249int 250anv_gem_execbuffer(struct anv_device *device, 251 struct drm_i915_gem_execbuffer2 *execbuf) 252{ 253 if (execbuf->flags & I915_EXEC_FENCE_OUT) 254 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf); 255 else 256 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf); 257} 258 259/** Return -1 on error. */ 260int 261anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle) 262{ 263 struct drm_i915_gem_get_tiling get_tiling = { 264 .handle = gem_handle, 265 }; 266 267 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING 268 * anymore, so we will need another way to get the tiling. Apparently this 269 * is only used in Android code, so we may need some other way to 270 * communicate the tiling mode. 271 */ 272 if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) { 273 assert(!"Failed to get BO tiling"); 274 return -1; 275 } 276 277 return get_tiling.tiling_mode; 278} 279 280int 281anv_gem_set_tiling(struct anv_device *device, 282 uint32_t gem_handle, uint32_t stride, uint32_t tiling) 283{ 284 int ret; 285 286 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So 287 * nothing needs to be done. 288 */ 289 if (!device->info.has_tiling_uapi) 290 return 0; 291 292 /* set_tiling overwrites the input on the error path, so we have to open 293 * code intel_ioctl. 294 */ 295 do { 296 struct drm_i915_gem_set_tiling set_tiling = { 297 .handle = gem_handle, 298 .tiling_mode = tiling, 299 .stride = stride, 300 }; 301 302 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); 303 } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 304 305 return ret; 306} 307 308int 309anv_gem_get_param(int fd, uint32_t param) 310{ 311 int tmp; 312 313 drm_i915_getparam_t gp = { 314 .param = param, 315 .value = &tmp, 316 }; 317 318 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); 319 if (ret == 0) 320 return tmp; 321 322 return 0; 323} 324 325bool 326anv_gem_has_context_priority(int fd, int priority) 327{ 328 return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY, 329 priority); 330} 331 332int 333anv_gem_create_context(struct anv_device *device) 334{ 335 struct drm_i915_gem_context_create create = { 0 }; 336 337 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); 338 if (ret == -1) 339 return -1; 340 341 return create.ctx_id; 342} 343 344int 345anv_gem_destroy_context(struct anv_device *device, int context) 346{ 347 struct drm_i915_gem_context_destroy destroy = { 348 .ctx_id = context, 349 }; 350 351 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy); 352} 353 354int 355anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value) 356{ 357 struct drm_i915_gem_context_param p = { 358 .ctx_id = context, 359 .param = param, 360 .value = value, 361 }; 362 int err = 0; 363 364 if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p)) 365 err = -errno; 366 return err; 367} 368 369int 370anv_gem_context_get_reset_stats(int fd, int context, 371 uint32_t *active, uint32_t *pending) 372{ 373 struct drm_i915_reset_stats stats = { 374 .ctx_id = context, 375 }; 376 377 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats); 378 if (ret == 0) { 379 *active = stats.batch_active; 380 *pending = stats.batch_pending; 381 } 382 383 return ret; 384} 385 386int 387anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle) 388{ 389 struct drm_prime_handle args = { 390 .handle = gem_handle, 391 .flags = DRM_CLOEXEC | DRM_RDWR, 392 }; 393 394 int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args); 395 if (ret == -1) 396 return -1; 397 398 return args.fd; 399} 400 401uint32_t 402anv_gem_fd_to_handle(struct anv_device *device, int fd) 403{ 404 struct drm_prime_handle args = { 405 .fd = fd, 406 }; 407 408 int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args); 409 if (ret == -1) 410 return 0; 411 412 return args.handle; 413} 414 415int 416anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result) 417{ 418 struct drm_i915_reg_read args = { 419 .offset = offset 420 }; 421 422 int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args); 423 424 *result = args.val; 425 return ret; 426} 427 428struct drm_i915_query_engine_info * 429anv_gem_get_engine_info(int fd) 430{ 431 return intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO, NULL); 432} 433