Lines Matching refs:screen

85 get_slabs(struct zink_screen *screen, uint64_t size, enum zink_alloc_flag flags)
87 //struct pb_slabs *bo_slabs = ((flags & RADEON_FLAG_ENCRYPTED) && screen->info.has_tmz_support) ?
88 //screen->bo_slabs_encrypted : screen->bo_slabs;
90 struct pb_slabs *bo_slabs = screen->pb.bo_slabs;
105 get_slab_pot_entry_size(struct zink_screen *screen, unsigned size)
108 unsigned min_entry_size = 1 << screen->pb.bo_slabs[0].min_order;
114 static unsigned get_slab_entry_alignment(struct zink_screen *screen, unsigned size)
116 unsigned entry_size = get_slab_pot_entry_size(screen, size);
125 bo_destroy(struct zink_screen *screen, struct pb_buffer *pbuf)
146 zink_bo_unmap(screen, bo);
149 VKSCR(FreeMemory)(screen->dev, bo->mem, NULL);
156 bo_can_reclaim(struct zink_screen *screen, struct pb_buffer *pbuf)
160 return zink_screen_usage_check_completion(screen, bo->reads) && zink_screen_usage_check_completion(screen, bo->writes);
172 bo_slab_free(struct zink_screen *screen, struct pb_slab *pslab)
179 zink_bo_unref(screen, slab->buffer);
184 bo_slab_destroy(struct zink_screen *screen, struct pb_buffer *pbuf)
191 //pb_slab_free(get_slabs(screen, bo->base.size, RADEON_FLAG_ENCRYPTED), &bo->u.slab.entry);
193 pb_slab_free(get_slabs(screen, bo->base.size, 0), &bo->u.slab.entry);
197 clean_up_buffer_managers(struct zink_screen *screen)
200 pb_slabs_reclaim(&screen->pb.bo_slabs[i]);
201 //if (screen->info.has_tmz_support)
202 //pb_slabs_reclaim(&screen->bo_slabs_encrypted[i]);
205 pb_cache_release_all_buffers(&screen->pb.bo_cache);
209 get_optimal_alignment(struct zink_screen *screen, uint64_t size, unsigned alignment)
225 bo_destroy_or_cache(struct zink_screen *screen, struct pb_buffer *pbuf)
236 bo_destroy(screen, pbuf);
246 bo_create_internal(struct zink_screen *screen,
260 alignment = get_optimal_alignment(screen, size, alignment);
266 mai.memoryTypeIndex = screen->heap_map[heap];
267 if (screen->info.mem_props.memoryTypes[mai.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
268 alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
269 mai.allocationSize = align64(mai.allocationSize, screen->info.props.limits.minMemoryMapAlignment);
271 unsigned heap_idx = screen->info.mem_props.memoryTypes[screen->heap_map[heap]].heapIndex;
272 if (mai.allocationSize > screen->info.mem_props.memoryHeaps[heap_idx].size) {
273 mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[heap_idx].size);
286 VkResult ret = VKSCR(AllocateMemory)(screen->dev, &mai, NULL, &bo->mem);
287 if (!zink_screen_handle_vkresult(screen, ret)) {
294 pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap);
308 bo->base.placement = screen->heap_flags[heap];
310 bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id);
315 bo_destroy(screen, (void*)bo);
325 sparse_backing_alloc(struct zink_screen *screen, struct zink_bo *bo,
374 buf = zink_bo_create(screen, size, ZINK_SPARSE_BUFFER_PAGE_SIZE,
411 sparse_free_backing_buffer(struct zink_screen *screen, struct zink_bo *bo,
417 zink_bo_unref(screen, backing->bo);
427 sparse_backing_free(struct zink_screen *screen, struct zink_bo *bo,
482 sparse_free_backing_buffer(screen, bo, backing);
488 bo_sparse_destroy(struct zink_screen *screen, struct pb_buffer *pbuf)
495 sparse_free_backing_buffer(screen, bo,
512 bo_sparse_create(struct zink_screen *screen, uint64_t size)
533 bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id);
553 zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext)
559 //struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && screen->info.has_tmz_support) ?
560 //screen->bo_slabs_encrypted : screen->bo_slabs;
561 struct pb_slabs *slabs = screen->pb.bo_slabs;
582 if (alignment > get_slab_entry_alignment(screen, alloc_size)) {
586 unsigned pot_size = get_slab_pot_entry_size(screen, alloc_size);
596 struct pb_slabs *slabs = get_slabs(screen, alloc_size, flags);
598 if (heap == ZINK_HEAP_DEVICE_LOCAL_VISIBLE && !screen->resizable_bar) {
600 if (screen->info.driver_props.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY)
602 unsigned heapidx = screen->info.mem_props.memoryTypes[screen->heap_map[heap]].heapIndex;
603 reclaim_all = screen->info.mem_props.memoryHeaps[heapidx].size <= low_bound;
608 clean_up_buffer_managers(screen);
627 return bo_sparse_create(screen, size);
635 size = align64(size, screen->info.props.limits.minMemoryMapAlignment);
636 alignment = align(alignment, screen->info.props.limits.minMemoryMapAlignment);
644 pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap);
650 bo = bo_create_internal(screen, size, alignment, heap, flags, pNext);
653 clean_up_buffer_managers(screen);
655 bo = bo_create_internal(screen, size, alignment, heap, flags, pNext);
664 zink_bo_map(struct zink_screen *screen, struct zink_bo *bo)
684 VkResult result = VKSCR(MapMemory)(screen->dev, real->mem, 0, real->base.size, 0, &cpu);
700 zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo)
708 VKSCR(UnmapMemory)(screen->dev, real->mem);
713 get_semaphore(struct zink_screen *screen)
721 VkResult ret = VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &sem);
726 buffer_commit_single(struct zink_screen *screen, struct zink_resource *res, struct zink_bo *bo, uint32_t bo_offset, uint32_t offset, uint32_t size, bool commit, VkSemaphore wait)
728 VkSemaphore sem = get_semaphore(screen);
753 VkResult ret = VKSCR(QueueBindSparse)(screen->queue_sparse, 1, &sparse, VK_NULL_HANDLE);
754 if (zink_screen_handle_vkresult(screen, ret))
756 VKSCR(DestroySemaphore)(screen->dev, sem, NULL);
761 buffer_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t offset, uint32_t size, bool commit, VkSemaphore *sem)
796 backing = sparse_backing_alloc(screen, bo, &backing_start, &backing_size);
801 cur_sem = buffer_commit_single(screen, res, backing->bo, backing_start,
805 ok = sparse_backing_free(screen, bo, backing, backing_start, backing_size);
836 cur_sem = buffer_commit_single(screen, res, NULL, 0,
862 if (!sparse_backing_free(screen, bo, backing, backing_start, span_pages)) {
875 texture_commit_single(struct zink_screen *screen, struct zink_resource *res, VkSparseImageMemoryBind *ibind, unsigned num_binds, bool commit, VkSemaphore wait)
877 VkSemaphore sem = get_semaphore(screen);
892 VkResult ret = VKSCR(QueueBindSparse)(screen->queue_sparse, 1, &sparse, VK_NULL_HANDLE);
893 if (zink_screen_handle_vkresult(screen, ret))
895 VKSCR(DestroySemaphore)(screen->dev, sem, NULL);
900 texture_commit_miptail(struct zink_screen *screen, struct zink_resource *res, struct zink_bo *bo, uint32_t bo_offset, uint32_t offset, bool commit, VkSemaphore wait)
902 VkSemaphore sem = get_semaphore(screen);
924 VkResult ret = VKSCR(QueueBindSparse)(screen->queue_sparse, 1, &sparse, VK_NULL_HANDLE);
925 if (zink_screen_handle_vkresult(screen, ret))
927 VKSCR(DestroySemaphore)(screen->dev, sem, NULL);
932 zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem)
938 if (screen->faked_e5sparse && res->base.b.format == PIPE_FORMAT_R9G9B9E5_FLOAT)
941 simple_mtx_lock(&screen->queue_lock);
944 ok = buffer_bo_commit(screen, res, box->x, box->width, commit, sem);
1020 backing[i] = sparse_backing_alloc(screen, bo, &backing_start[i], &backing_size[i]);
1027 cur_sem = texture_commit_miptail(screen, res, backing[i]->bo, backing_start[i], offset, commit, cur_sem);
1075 cur_sem = texture_commit_miptail(screen, res, NULL, 0, offset, commit, cur_sem);
1085 cur_sem = texture_commit_single(screen, res, ibind, ARRAY_SIZE(ibind), commit, cur_sem);
1088 ok = sparse_backing_free(screen, backing[s]->bo, backing[s], backing_start[s], backing_size[s]);
1104 cur_sem = texture_commit_single(screen, res, ibind, i, commit, cur_sem);
1107 ok = sparse_backing_free(screen, backing[s]->bo, backing[s], backing_start[s], backing_size[s]);
1119 simple_mtx_unlock(&screen->queue_lock);
1125 zink_bo_get_kms_handle(struct zink_screen *screen, struct zink_bo *bo, int fd, uint32_t *handle)
1142 bool success = drmPrimeFDToHandle(screen->drm_fd, fd, handle) == 0;
1146 export->drm_fd = screen->drm_fd;
1167 struct zink_screen *screen = priv;
1175 //struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && screen->info.has_tmz_support) ?
1176 //screen->bo_slabs_encrypted : screen->bo_slabs;
1177 struct pb_slabs *slabs = screen->pb.bo_slabs;
1207 slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size, heap, 0, NULL));
1222 base_id = p_atomic_fetch_add(&screen->pb.next_bo_unique_id, slab->base.num_entries);
1227 bo->base.alignment_log2 = util_logbase2(get_slab_entry_alignment(screen, entry_size));
1255 zink_bo_unref(screen, slab->buffer);
1268 zink_bo_init(struct zink_screen *screen)
1271 for (uint32_t i = 0; i < screen->info.mem_props.memoryHeapCount; ++i)
1272 total_mem += screen->info.mem_props.memoryHeaps[i].size;
1274 pb_cache_init(&screen->pb.bo_cache, ZINK_HEAP_MAX,
1276 total_mem / 8, screen,
1290 if (!pb_slabs_init(&screen->pb.bo_slabs[i],
1293 screen,
1301 screen->pb.min_alloc_size = 1 << screen->pb.bo_slabs[0].min_order;
1306 zink_bo_deinit(struct zink_screen *screen)
1309 if (screen->pb.bo_slabs[i].groups)
1310 pb_slabs_deinit(&screen->pb.bo_slabs[i]);
1312 pb_cache_deinit(&screen->pb.bo_cache);