Lines Matching defs:bufmgr

190     * List into the list of bufmgr.
284 bucket_info_for_heap(struct iris_bufmgr *bufmgr, enum iris_heap heap,
289 *cache_bucket = bufmgr->cache_bucket;
290 *num_buckets = &bufmgr->num_buckets;
293 *cache_bucket = bufmgr->local_cache_bucket;
294 *num_buckets = &bufmgr->num_local_buckets;
297 *cache_bucket = bufmgr->local_preferred_cache_bucket;
298 *num_buckets = &bufmgr->num_local_preferred_buckets;
315 bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size,
348 bucket_info_for_heap(bufmgr, heap, &buckets, &num_buckets);
391 vma_alloc(struct iris_bufmgr *bufmgr,
396 simple_mtx_assert_locked(&bufmgr->lock);
400 alignment = MAX2(alignment, bufmgr->vma_min_align);
406 util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size, alignment);
415 vma_free(struct iris_bufmgr *bufmgr,
419 simple_mtx_assert_locked(&bufmgr->lock);
432 assert(memzone < ARRAY_SIZE(bufmgr->vma_allocator));
434 util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
442 struct iris_bufmgr *bufmgr = bo->bufmgr;
445 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
457 struct iris_bufmgr *bufmgr = bo->bufmgr;
463 simple_mtx_lock(&bufmgr->bo_deps_lock);
494 ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
503 iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
504 iris_syncobj_reference(bufmgr, &bo->deps[d].read_syncobjs[b], NULL);
509 simple_mtx_unlock(&bufmgr->bo_deps_lock);
545 intel_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
575 get_slabs(struct iris_bufmgr *bufmgr, uint64_t size)
578 struct pb_slabs *slabs = &bufmgr->bo_slabs[i];
589 get_slab_pot_entry_size(struct iris_bufmgr *bufmgr, unsigned size)
592 unsigned min_entry_size = 1 << bufmgr->bo_slabs[0].min_order;
599 get_slab_entry_alignment(struct iris_bufmgr *bufmgr, unsigned size)
601 unsigned entry_size = get_slab_pot_entry_size(bufmgr, size);
620 struct iris_bufmgr *bufmgr = priv;
622 struct intel_aux_map_context *aux_map_ctx = bufmgr->aux_map_ctx;
641 iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
642 iris_syncobj_reference(bufmgr, &bo->deps[d].read_syncobjs[b], NULL);
660 struct iris_bufmgr *bufmgr = priv;
671 struct pb_slabs *slabs = bufmgr->bo_slabs;
716 iris_bo_alloc(bufmgr, "slab", slab_size, slab_size, memzone, flags);
735 bo->bufmgr = bufmgr;
763 flags_to_heap(struct iris_bufmgr *bufmgr, unsigned flags)
765 if (bufmgr->vram.size > 0 &&
777 alloc_bo_from_slabs(struct iris_bufmgr *bufmgr,
786 struct pb_slabs *last_slab = &bufmgr->bo_slabs[NUM_SLAB_ALLOCATORS - 1];
795 enum iris_heap heap = flags_to_heap(bufmgr, flags);
805 if (alignment > get_slab_entry_alignment(bufmgr, alloc_size)) {
809 unsigned pot_size = get_slab_pot_entry_size(bufmgr, alloc_size);
820 struct pb_slabs *slabs = get_slabs(bufmgr, alloc_size);
833 if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
840 intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
866 alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
879 simple_mtx_assert_locked(&bufmgr->lock);
924 if (bo->bufmgr->aux_map_ctx)
925 intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
935 vma_free(bufmgr, bo->address, bo->size);
956 alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, unsigned flags)
962 bo->real.heap = flags_to_heap(bufmgr, flags);
967 if (bufmgr->vram.size > 0) {
976 regions[nregions++] = bufmgr->vram.region;
977 regions[nregions++] = bufmgr->sys.region;
980 regions[nregions++] = bufmgr->vram.region;
983 regions[nregions++] = bufmgr->sys.region;
1000 if (!bufmgr->all_vram_mappable &&
1009 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create) != 0) {
1020 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
1027 bo->bufmgr = bufmgr;
1031 if (bufmgr->vram.size == 0) {
1042 intel_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd);
1056 iris_bo_alloc(struct iris_bufmgr *bufmgr,
1065 enum iris_heap heap = flags_to_heap(bufmgr, flags);
1067 struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size, heap);
1072 bo = alloc_bo_from_slabs(bufmgr, name, size, alignment, flags);
1083 bool is_coherent = bufmgr->has_llc ||
1084 (bufmgr->vram.size > 0 && !local) ||
1089 if (!bufmgr->all_vram_mappable && heap == IRIS_HEAP_DEVICE_LOCAL)
1096 simple_mtx_lock(&bufmgr->lock);
1101 bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
1106 bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
1110 simple_mtx_unlock(&bufmgr->lock);
1113 bo = alloc_fresh_bo(bufmgr, bo_size, flags);
1119 simple_mtx_lock(&bufmgr->lock);
1120 bo->address = vma_alloc(bufmgr, memzone, bo->size, alignment);
1121 simple_mtx_unlock(&bufmgr->lock);
1129 bo->real.reusable = bucket && bufmgr->bo_reuse;
1146 !bufmgr->has_llc && bufmgr->vram.size == 0) {
1151 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) != 0)
1164 simple_mtx_lock(&bufmgr->lock);
1166 simple_mtx_unlock(&bufmgr->lock);
1171 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
1185 .flags = bufmgr->has_userptr_probe ? I915_USERPTR_PROBE : 0,
1187 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
1191 if (!bufmgr->has_userptr_probe) {
1197 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
1205 bo->bufmgr = bufmgr;
1208 simple_mtx_lock(&bufmgr->lock);
1209 bo->address = vma_alloc(bufmgr, memzone, size, 1);
1210 simple_mtx_unlock(&bufmgr->lock);
1225 intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
1238 iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
1249 simple_mtx_lock(&bufmgr->lock);
1250 bo = find_and_ref_external_bo(bufmgr->name_table, handle);
1255 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
1266 bo = find_and_ref_external_bo(bufmgr->handle_table, open_arg.handle);
1273 intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
1280 bo->bufmgr = bufmgr;
1288 bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
1296 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1297 _mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
1302 simple_mtx_unlock(&bufmgr->lock);
1309 struct iris_bufmgr *bufmgr = bo->bufmgr;
1311 simple_mtx_assert_locked(&bufmgr->lock);
1318 entry = _mesa_hash_table_search(bufmgr->name_table,
1320 _mesa_hash_table_remove(bufmgr->name_table, entry);
1323 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
1324 _mesa_hash_table_remove(bufmgr->handle_table, entry);
1339 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
1345 if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
1346 intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
1351 vma_free(bo->bufmgr, bo->address, bo->size);
1355 iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
1356 iris_syncobj_reference(bufmgr, &bo->deps[d].read_syncobjs[b], NULL);
1367 struct iris_bufmgr *bufmgr = bo->bufmgr;
1369 simple_mtx_assert_locked(&bufmgr->lock);
1381 list_addtail(&bo->head, &bufmgr->zombie_list);
1387 cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
1391 simple_mtx_assert_locked(&bufmgr->lock);
1393 if (bufmgr->time == time)
1396 for (i = 0; i < bufmgr->num_buckets; i++) {
1397 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1409 for (i = 0; i < bufmgr->num_local_buckets; i++) {
1410 struct bo_cache_bucket *bucket = &bufmgr->local_cache_bucket[i];
1422 for (i = 0; i < bufmgr->num_local_preferred_buckets; i++) {
1423 struct bo_cache_bucket *bucket = &bufmgr->local_preferred_cache_bucket[i];
1435 list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
1446 bufmgr->time = time;
1452 struct iris_bufmgr *bufmgr = bo->bufmgr;
1461 bucket = bucket_for_size(bufmgr, bo->size, bo->real.heap);
1482 struct iris_bufmgr *bufmgr = bo->bufmgr;
1488 pb_slab_free(get_slabs(bufmgr, bo->size), &bo->slab.entry);
1490 simple_mtx_lock(&bufmgr->lock);
1494 cleanup_bo_cache(bufmgr, time.tv_sec);
1497 simple_mtx_unlock(&bufmgr->lock);
1542 struct iris_bufmgr *bufmgr = bo->bufmgr;
1544 assert(bufmgr->vram.size == 0);
1555 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
1569 struct iris_bufmgr *bufmgr = bo->bufmgr;
1577 if (bufmgr->has_local_mem) {
1607 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
1616 bufmgr->fd, mmap_arg.offset);
1630 struct iris_bufmgr *bufmgr = bo->bufmgr;
1644 map = bufmgr->has_mmap_offset ? iris_bo_gem_mmap_offset(dbg, bo)
1687 struct iris_bufmgr *bufmgr = bo->bufmgr;
1693 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1746 iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
1748 iris_destroy_border_color_pool(&bufmgr->border_color_pool);
1751 intel_aux_map_finish(bufmgr->aux_map_ctx);
1753 /* bufmgr will no longer try to free VMA entries in the aux-map */
1754 bufmgr->aux_map_ctx = NULL;
1757 if (bufmgr->bo_slabs[i].groups)
1758 pb_slabs_deinit(&bufmgr->bo_slabs[i]);
1761 simple_mtx_lock(&bufmgr->lock);
1763 for (int i = 0; i < bufmgr->num_buckets; i++) {
1764 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1773 for (int i = 0; i < bufmgr->num_local_buckets; i++) {
1774 struct bo_cache_bucket *bucket = &bufmgr->local_cache_bucket[i];
1783 for (int i = 0; i < bufmgr->num_local_preferred_buckets; i++) {
1784 struct bo_cache_bucket *bucket = &bufmgr->local_preferred_cache_bucket[i];
1794 list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
1799 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
1800 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1803 util_vma_heap_finish(&bufmgr->vma_allocator[z]);
1805 close(bufmgr->fd);
1807 simple_mtx_unlock(&bufmgr->lock);
1809 simple_mtx_destroy(&bufmgr->lock);
1810 simple_mtx_destroy(&bufmgr->bo_deps_lock);
1812 free(bufmgr);
1818 struct iris_bufmgr *bufmgr = bo->bufmgr;
1820 if (!bufmgr->has_tiling_uapi) {
1826 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &ti);
1841 struct iris_bufmgr *bufmgr = bo->bufmgr;
1848 if (!bufmgr->has_tiling_uapi)
1860 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1872 iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
1877 simple_mtx_lock(&bufmgr->lock);
1878 int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1882 simple_mtx_unlock(&bufmgr->lock);
1891 bo = find_and_ref_external_bo(bufmgr->handle_table, handle);
1910 bo->bufmgr = bufmgr;
1928 bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
1936 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1939 simple_mtx_unlock(&bufmgr->lock);
1946 struct iris_bufmgr *bufmgr = bo->bufmgr;
1950 simple_mtx_assert_locked(&bufmgr->lock);
1953 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1968 struct iris_bufmgr *bufmgr = bo->bufmgr;
1978 simple_mtx_lock(&bufmgr->lock);
1980 simple_mtx_unlock(&bufmgr->lock);
1986 struct iris_bufmgr *bufmgr = bo->bufmgr;
1993 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
2014 struct iris_bufmgr *bufmgr = bo->bufmgr;
2022 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
2025 simple_mtx_lock(&bufmgr->lock);
2029 _mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
2031 simple_mtx_unlock(&bufmgr->lock);
2049 struct iris_bufmgr *bufmgr = bo->bufmgr;
2050 int ret = os_same_file_description(drm_fd, bufmgr->fd);
2072 simple_mtx_lock(&bufmgr->lock);
2076 simple_mtx_unlock(&bufmgr->lock);
2097 simple_mtx_unlock(&bufmgr->lock);
2105 add_bucket(struct iris_bufmgr *bufmgr, int size, enum iris_heap heap)
2109 bucket_info_for_heap(bufmgr, heap, &buckets, &num_buckets);
2116 assert(bucket_for_size(bufmgr, size, heap) == &buckets[i]);
2117 assert(bucket_for_size(bufmgr, size - 2048, heap) == &buckets[i]);
2118 assert(bucket_for_size(bufmgr, size + 1, heap) != &buckets[i]);
2122 init_cache_buckets(struct iris_bufmgr *bufmgr, enum iris_heap heap)
2134 add_bucket(bufmgr, PAGE_SIZE, heap);
2135 add_bucket(bufmgr, PAGE_SIZE * 2, heap);
2136 add_bucket(bufmgr, PAGE_SIZE * 3, heap);
2140 add_bucket(bufmgr, size, heap);
2142 add_bucket(bufmgr, size + size * 1 / 4, heap);
2143 add_bucket(bufmgr, size + size * 2 / 4, heap);
2144 add_bucket(bufmgr, size + size * 3 / 4, heap);
2149 iris_hw_context_set_unrecoverable(struct iris_bufmgr *bufmgr,
2172 intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p);
2176 iris_hw_context_set_vm_id(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
2178 if (!bufmgr->use_global_vm)
2184 .value = bufmgr->global_vm_id,
2186 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p);
2194 iris_create_hw_context(struct iris_bufmgr *bufmgr)
2197 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2203 iris_hw_context_set_unrecoverable(bufmgr, create.ctx_id);
2204 iris_hw_context_set_vm_id(bufmgr, create.ctx_id);
2210 iris_kernel_context_get_priority(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
2216 intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
2221 iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
2233 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
2240 iris_clone_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
2242 uint32_t new_ctx = iris_create_hw_context(bufmgr);
2245 int priority = iris_kernel_context_get_priority(bufmgr, ctx_id);
2246 iris_hw_context_set_priority(bufmgr, new_ctx, priority);
2253 iris_destroy_kernel_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
2258 intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
2265 iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
2268 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
2281 struct iris_bufmgr *bufmgr = (struct iris_bufmgr *)driver_ctx;
2286 struct iris_bo *bo = alloc_fresh_bo(bufmgr, size, 0);
2292 simple_mtx_lock(&bufmgr->lock);
2294 bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
2298 simple_mtx_unlock(&bufmgr->lock);
2302 simple_mtx_unlock(&bufmgr->lock);
2344 iris_bufmgr_get_meminfo(struct iris_bufmgr *bufmgr,
2347 bufmgr->sys.region.memory_class = devinfo->mem.sram.mem_class;
2348 bufmgr->sys.region.memory_instance = devinfo->mem.sram.mem_instance;
2349 bufmgr->sys.size = devinfo->mem.sram.mappable.size;
2351 bufmgr->vram.region.memory_class = devinfo->mem.vram.mem_class;
2352 bufmgr->vram.region.memory_instance = devinfo->mem.vram.mem_instance;
2353 bufmgr->vram.size = devinfo->mem.vram.mappable.size;
2359 iris_bufmgr_init_global_vm(int fd, struct iris_bufmgr *bufmgr)
2367 bufmgr->use_global_vm = false;
2368 bufmgr->global_vm_id = 0;
2370 bufmgr->use_global_vm = true;
2371 bufmgr->global_vm_id = gcp.value;
2387 struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
2388 if (bufmgr == NULL)
2393 * multiple parties (threads sharing the same screen bufmgr, or
2397 * Don't do this! Ensure that each library/bufmgr has its own device
2400 bufmgr->fd = os_dupfd_cloexec(fd);
2402 p_atomic_set(&bufmgr->refcount, 1);
2404 simple_mtx_init(&bufmgr->lock, mtx_plain);
2405 simple_mtx_init(&bufmgr->bo_deps_lock, mtx_plain);
2407 iris_bufmgr_init_global_vm(fd, bufmgr);
2409 list_inithead(&bufmgr->zombie_list);
2411 bufmgr->has_llc = devinfo->has_llc;
2412 bufmgr->has_local_mem = devinfo->has_local_mem;
2413 bufmgr->has_tiling_uapi = devinfo->has_tiling_uapi;
2414 bufmgr->bo_reuse = bo_reuse;
2415 bufmgr->has_mmap_offset = gem_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
2416 bufmgr->has_userptr_probe =
2418 iris_bufmgr_get_meminfo(bufmgr, devinfo);
2419 bufmgr->all_vram_mappable = intel_vram_all_mappable(devinfo);
2428 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
2430 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_BINDER],
2432 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_BINDLESS],
2434 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
2449 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
2456 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
2460 init_cache_buckets(bufmgr, IRIS_HEAP_SYSTEM_MEMORY);
2461 init_cache_buckets(bufmgr, IRIS_HEAP_DEVICE_LOCAL);
2462 init_cache_buckets(bufmgr, IRIS_HEAP_DEVICE_LOCAL_PREFERRED);
2475 if (!pb_slabs_init(&bufmgr->bo_slabs[i], min_order, max_order,
2476 IRIS_HEAP_MAX, true, bufmgr,
2480 free(bufmgr);
2486 bufmgr->name_table =
2488 bufmgr->handle_table =
2491 bufmgr->vma_min_align =
2496 bufmgr->aux_map_ctx = intel_aux_map_init(bufmgr, &aux_map_allocator,
2498 assert(bufmgr->aux_map_ctx);
2501 iris_init_border_color_pool(bufmgr, &bufmgr->border_color_pool);
2503 return bufmgr;
2507 iris_bufmgr_ref(struct iris_bufmgr *bufmgr)
2509 p_atomic_inc(&bufmgr->refcount);
2510 return bufmgr;
2514 iris_bufmgr_unref(struct iris_bufmgr *bufmgr)
2517 if (p_atomic_dec_zero(&bufmgr->refcount)) {
2518 list_del(&bufmgr->link);
2519 iris_bufmgr_destroy(bufmgr);
2526 iris_bufmgr_create_screen_id(struct iris_bufmgr *bufmgr)
2528 return p_atomic_inc_return(&bufmgr->next_screen_id) - 1;
2544 struct iris_bufmgr *bufmgr = NULL;
2554 bufmgr = iris_bufmgr_ref(iter_bufmgr);
2559 bufmgr = iris_bufmgr_create(devinfo, fd, bo_reuse);
2560 if (bufmgr)
2561 list_addtail(&bufmgr->link, &global_bufmgr_list);
2566 return bufmgr;
2570 iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr)
2572 return bufmgr->fd;
2576 iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr)
2578 return bufmgr->aux_map_ctx;
2582 iris_bufmgr_get_bo_deps_lock(struct iris_bufmgr *bufmgr)
2584 return &bufmgr->bo_deps_lock;
2588 iris_bufmgr_get_border_color_pool(struct iris_bufmgr *bufmgr)
2590 return &bufmgr->border_color_pool;
2594 iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr)
2596 return bufmgr->vram.size;
2600 iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr)
2602 return bufmgr->sys.size;