Lines Matching refs:bo
33 drm_private void bo_del(struct fd_bo *bo);
36 static void set_name(struct fd_bo *bo, uint32_t name)
38 bo->name = name;
40 drmHashInsert(bo->dev->name_table, name, bo);
46 struct fd_bo *bo = NULL;
47 if (!drmHashLookup(tbl, key, (void **)&bo)) {
49 bo = fd_bo_ref(bo);
51 /* don't break the bucket if this bo was found in one */
52 list_delinit(&bo->list);
54 return bo;
61 struct fd_bo *bo;
63 bo = dev->funcs->bo_from_handle(dev, size, handle);
64 if (!bo) {
68 bo->dev = fd_device_ref(dev);
69 bo->size = size;
70 bo->handle = handle;
71 atomic_set(&bo->refcnt, 1);
72 list_inithead(&bo->list);
74 drmHashInsert(dev->handle_table, handle, bo);
75 return bo;
82 struct fd_bo *bo = NULL;
86 bo = fd_bo_cache_alloc(cache, &size, flags);
87 if (bo)
88 return bo;
95 bo = bo_from_handle(dev, size, handle);
98 VG_BO_ALLOC(bo);
100 return bo;
106 struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
107 if (bo)
108 bo->bo_reuse = BO_CACHE;
109 return bo;
112 /* internal function to allocate bo's that use the ringbuffer cache
114 * bo's get vmap'd on the kernel side, and that is expensive, we want
115 * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
120 struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
121 if (bo)
122 bo->bo_reuse = RING_CACHE;
123 return bo;
129 struct fd_bo *bo = NULL;
133 bo = lookup_bo(dev->handle_table, handle);
134 if (bo)
137 bo = bo_from_handle(dev, size, handle);
139 VG_BO_ALLOC(bo);
144 return bo;
152 struct fd_bo *bo;
161 bo = lookup_bo(dev->handle_table, handle);
162 if (bo)
165 /* lseek() to get bo size */
169 bo = bo_from_handle(dev, size, handle);
171 VG_BO_ALLOC(bo);
176 return bo;
184 struct fd_bo *bo;
188 /* check name table first, to see if bo is already open: */
189 bo = lookup_bo(dev->name_table, name);
190 if (bo)
198 bo = lookup_bo(dev->handle_table, req.handle);
199 if (bo)
202 bo = bo_from_handle(dev, req.size, req.handle);
203 if (bo) {
204 set_name(bo, name);
205 VG_BO_ALLOC(bo);
211 return bo;
214 drm_public uint64_t fd_bo_get_iova(struct fd_bo *bo)
216 return bo->funcs->iova(bo);
219 drm_public void fd_bo_put_iova(struct fd_bo *bo)
224 drm_public struct fd_bo * fd_bo_ref(struct fd_bo *bo)
226 atomic_inc(&bo->refcnt);
227 return bo;
230 drm_public void fd_bo_del(struct fd_bo *bo)
232 struct fd_device *dev = bo->dev;
234 if (!atomic_dec_and_test(&bo->refcnt))
239 if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
241 if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
244 bo_del(bo);
251 drm_private void bo_del(struct fd_bo *bo)
253 VG_BO_FREE(bo);
255 if (bo->map)
256 drm_munmap(bo->map, bo->size);
258 /* TODO probably bo's in bucket list get removed from
262 if (bo->handle) {
263 drmHashDelete(bo->dev->handle_table, bo->handle);
264 if (bo->name)
265 drmHashDelete(bo->dev->name_table, bo->name);
266 drmCloseBufferHandle(bo->dev->fd, bo->handle);
269 bo->funcs->destroy(bo);
272 drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
274 if (!bo->name) {
276 .handle = bo->handle,
280 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
286 set_name(bo, req.name);
288 bo->bo_reuse = NO_CACHE;
291 *name = bo->name;
296 drm_public uint32_t fd_bo_handle(struct fd_bo *bo)
298 return bo->handle;
301 drm_public int fd_bo_dmabuf(struct fd_bo *bo)
305 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
312 bo->bo_reuse = NO_CACHE;
317 drm_public uint32_t fd_bo_size(struct fd_bo *bo)
319 return bo->size;
322 drm_public void * fd_bo_map(struct fd_bo *bo)
324 if (!bo->map) {
328 ret = bo->funcs->offset(bo, &offset);
333 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
334 bo->dev->fd, offset);
335 if (bo->map == MAP_FAILED) {
337 bo->map = NULL;
340 return bo->map;
344 drm_public int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
346 return bo->funcs->cpu_prep(bo, pipe, op);
349 drm_public void fd_bo_cpu_fini(struct fd_bo *bo)
351 bo->funcs->cpu_fini(bo);