Lines Matching refs:bo
33 void bo_del(struct fd_bo *bo);
37 set_name(struct fd_bo *bo, uint32_t name)
39 bo->name = name;
41 _mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
48 struct fd_bo *bo = NULL;
52 bo = fd_bo_ref(entry->data);
54 /* don't break the bucket if this bo was found in one */
55 list_delinit(&bo->list);
57 return bo;
61 fd_bo_init_common(struct fd_bo *bo, struct fd_device *dev)
64 assert(bo->size);
65 assert(bo->handle);
67 bo->dev = dev;
68 bo->iova = bo->funcs->iova(bo);
69 bo->reloc_flags = FD_RELOC_FLAGS_INIT;
71 p_atomic_set(&bo->refcnt, 1);
72 list_inithead(&bo->list);
79 struct fd_bo *bo;
83 bo = dev->funcs->bo_from_handle(dev, size, handle);
84 if (!bo) {
93 _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
95 return bo;
102 struct fd_bo *bo = NULL;
108 bo = fd_bo_cache_alloc(cache, &size, flags);
109 if (bo)
110 return bo;
112 bo = dev->funcs->bo_new(dev, size, flags);
113 if (!bo)
118 _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
121 bo->alloc_flags = flags;
122 bo->max_fences = 1;
123 bo->fences = &bo->_inline_fence;
125 VG_BO_ALLOC(bo);
127 return bo;
133 struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
134 if (bo)
135 bo->bo_reuse = BO_CACHE;
136 return bo;
140 _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
142 bo->funcs->set_name(bo, fmt, ap);
145 /* internal function to allocate bo's that use the ringbuffer cache
147 * bo's get vmap'd on the kernel side, and that is expensive, we want
148 * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
154 struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
155 if (bo) {
156 bo->bo_reuse = RING_CACHE;
157 bo->reloc_flags |= FD_RELOC_DUMP;
158 fd_bo_set_name(bo, "cmdstream");
160 return bo;
166 struct fd_bo *bo = NULL;
170 bo = lookup_bo(dev->handle_table, handle);
171 if (bo)
174 bo = bo_from_handle(dev, size, handle);
176 VG_BO_ALLOC(bo);
181 return bo;
189 struct fd_bo *bo;
198 bo = lookup_bo(dev->handle_table, handle);
199 if (bo)
202 /* lseek() to get bo size */
206 bo = bo_from_handle(dev, size, handle);
208 VG_BO_ALLOC(bo);
213 return bo;
222 struct fd_bo *bo;
226 /* check name table first, to see if bo is already open: */
227 bo = lookup_bo(dev->name_table, name);
228 if (bo)
236 bo = lookup_bo(dev->handle_table, req.handle);
237 if (bo)
240 bo = bo_from_handle(dev, req.size, req.handle);
241 if (bo) {
242 set_name(bo, name);
243 VG_BO_ALLOC(bo);
249 return bo;
253 fd_bo_mark_for_dump(struct fd_bo *bo)
255 bo->reloc_flags |= FD_RELOC_DUMP;
259 fd_bo_get_iova(struct fd_bo *bo)
262 assert(bo->iova != 0);
263 return bo->iova;
267 fd_bo_ref(struct fd_bo *bo)
269 p_atomic_inc(&bo->refcnt);
270 return bo;
274 bo_del_or_recycle(struct fd_bo *bo)
276 struct fd_device *dev = bo->dev;
280 if ((bo->bo_reuse == BO_CACHE) &&
281 (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
284 if ((bo->bo_reuse == RING_CACHE) &&
285 (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
288 bo_del(bo);
292 fd_bo_del_locked(struct fd_bo *bo)
296 if (!p_atomic_dec_zero(&bo->refcnt))
299 bo_del_or_recycle(bo);
303 fd_bo_del(struct fd_bo *bo)
305 if (!p_atomic_dec_zero(&bo->refcnt))
309 bo_del_or_recycle(bo);
317 * Normally we expect at most a single fence, the exception being bo's
321 cleanup_fences(struct fd_bo *bo, bool expired)
325 for (int i = 0; i < bo->nr_fences; i++) {
326 struct fd_bo_fence *f = &bo->fences[i];
333 bo->nr_fences--;
335 if (bo->nr_fences > 0) {
337 bo->fences[i] = bo->fences[bo->nr_fences];
347 bo_del(struct fd_bo *bo)
349 struct fd_device *dev = bo->dev;
350 uint32_t handle = bo->handle;
352 VG_BO_FREE(bo);
356 cleanup_fences(bo, false);
357 if (bo->fences != &bo->_inline_fence)
358 free(bo->fences);
360 if (bo->map)
361 os_munmap(bo->map, bo->size);
365 if (bo->name)
366 _mesa_hash_table_remove_key(dev->name_table, &bo->name);
369 bo->funcs->destroy(bo);
380 bo_flush(struct fd_bo *bo)
382 for (int i = 0; i < bo->nr_fences; i++) {
383 struct fd_bo_fence *f = &bo->fences[i];
389 fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
391 if (!bo->name) {
393 .handle = bo->handle,
397 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
403 set_name(bo, req.name);
405 bo->bo_reuse = NO_CACHE;
406 bo->shared = true;
407 bo_flush(bo);
410 *name = bo->name;
416 fd_bo_handle(struct fd_bo *bo)
418 bo->bo_reuse = NO_CACHE;
419 bo->shared = true;
420 bo_flush(bo);
421 return bo->handle;
425 fd_bo_dmabuf(struct fd_bo *bo)
429 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC | DRM_RDWR,
436 bo->bo_reuse = NO_CACHE;
437 bo->shared = true;
438 bo_flush(bo);
444 fd_bo_size(struct fd_bo *bo)
446 return bo->size;
450 fd_bo_is_cached(struct fd_bo *bo)
452 return !!(bo->alloc_flags & FD_BO_CACHED_COHERENT);
456 bo_map(struct fd_bo *bo)
458 if (!bo->map) {
462 ret = bo->funcs->offset(bo, &offset);
467 bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
468 bo->dev->fd, offset);
469 if (bo->map == MAP_FAILED) {
471 bo->map = NULL;
474 return bo->map;
478 fd_bo_map(struct fd_bo *bo)
483 if (bo->alloc_flags & FD_BO_NOMAP)
486 return bo_map(bo);
490 fd_bo_upload(struct fd_bo *bo, void *src, unsigned len)
492 if (bo->funcs->upload) {
493 bo->funcs->upload(bo, src, len);
497 memcpy(bo_map(bo), src, len);
502 fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
505 enum fd_bo_state state = fd_bo_state(bo);
513 bo_flush(bo);
524 /* In case the bo is referenced by a deferred submit, flush up to the
527 bo_flush(bo);
532 return bo->funcs->cpu_prep(bo, pipe, op & ~FD_BO_PREP_FLUSH);
536 fd_bo_cpu_fini(struct fd_bo *bo)
541 // bo->funcs->cpu_fini(bo);
545 fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence)
549 if (bo->nosync)
552 /* The common case is bo re-used on the same pipe it had previously
555 for (int i = 0; i < bo->nr_fences; i++) {
556 struct fd_bo_fence *f = &bo->fences[i];
564 cleanup_fences(bo, true);
570 if (unlikely((bo->nr_fences == 1) &&
571 (bo->fences == &bo->_inline_fence))) {
572 bo->nr_fences = bo->max_fences = 0;
573 bo->fences = NULL;
574 APPEND(bo, fences, bo->_inline_fence);
577 APPEND(bo, fences, (struct fd_bo_fence){
584 fd_bo_state(struct fd_bo *bo)
588 cleanup_fences(bo, true);
590 if (bo->shared || bo->nosync)
593 if (!bo->nr_fences)