Lines Matching refs:release
54 return "release";
87 struct qxl_release *release;
89 size_t size = sizeof(*release);
91 release = kmalloc(size, GFP_KERNEL);
92 if (!release) {
96 release->base.ops = NULL;
97 release->type = type;
98 release->release_offset = 0;
99 release->surface_release_id = 0;
100 INIT_LIST_HEAD(&release->bos);
104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
105 release->base.seqno = ++qdev->release_seqno;
109 kfree(release);
113 *ret = release;
114 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
115 release->id = handle;
120 qxl_release_free_list(struct qxl_release *release)
122 while (!list_empty(&release->bos)) {
126 entry = container_of(release->bos.next,
133 release->release_bo = NULL;
138 struct qxl_release *release)
140 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
142 if (release->surface_release_id)
143 qxl_surface_id_dealloc(qdev, release->surface_release_id);
146 idr_remove(&qdev->release_idr, release->id);
149 if (release->base.ops) {
150 WARN_ON(list_empty(&release->bos));
151 qxl_release_free_list(release);
153 dma_fence_signal(&release->base);
154 dma_fence_put(&release->base);
156 qxl_release_free_list(release);
157 kfree(release);
171 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
175 list_for_each_entry(entry, &release->bos, tv.head) {
187 list_add_tail(&entry->tv.head, &release->bos);
214 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
219 /* if only one object on the release its the release itself
221 if (list_is_singular(&release->bos))
224 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
229 list_for_each_entry(entry, &release->bos, tv.head) {
234 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
241 void qxl_release_backoff_reserve_list(struct qxl_release *release)
243 /* if only one object on the release its the release itself
245 if (list_is_singular(&release->bos))
248 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
254 struct qxl_release **release)
261 /* stash the release after the create command */
262 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
267 (*release)->release_bo = bo;
268 (*release)->release_offset = create_rel->release_offset + 64;
270 qxl_release_list_add(*release, bo);
272 info = qxl_release_map(qdev, *release);
274 qxl_release_unmap(qdev, *release, info);
279 QXL_RELEASE_SURFACE_CMD, release, NULL);
283 int type, struct qxl_release **release,
308 idr_ret = qxl_release_alloc(qdev, type, release);
330 qxl_release_free(qdev, *release);
337 (*release)->release_bo = bo;
338 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
350 ret = qxl_release_list_add(*release, bo);
353 qxl_release_free(qdev, *release);
357 info = qxl_release_map(qdev, *release);
359 qxl_release_unmap(qdev, *release, info);
367 struct qxl_release *release;
370 release = idr_find(&qdev->release_idr, id);
372 if (!release) {
377 return release;
381 struct qxl_release *release)
385 struct qxl_bo *bo = release->release_bo;
387 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
390 info = ptr + (release->release_offset & ~PAGE_MASK);
395 struct qxl_release *release,
398 struct qxl_bo *bo = release->release_bo;
401 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
405 void qxl_release_fence_buffer_objects(struct qxl_release *release)
412 /* if only one object on the release its the release itself
414 if (list_is_singular(&release->bos) || list_empty(&release->bos))
417 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
425 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
426 release->id | 0xf0000000, release->base.seqno);
427 trace_dma_fence_emit(&release->base);
429 list_for_each_entry(entry, &release->bos, head) {
432 dma_resv_add_fence(bo->base.resv, &release->base,
437 ww_acquire_fini(&release->ticket);