Lines Matching refs:qdev
60 struct qxl_device *qdev;
63 qdev = container_of(fence->lock, struct qxl_device, release_lock);
65 if (!wait_event_timeout(qdev->release_event,
67 (qxl_io_notify_oom(qdev), 0)),
84 qxl_release_alloc(struct qxl_device *qdev, int type,
103 spin_lock(&qdev->release_idr_lock);
104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
105 release->base.seqno = ++qdev->release_seqno;
106 spin_unlock(&qdev->release_idr_lock);
137 qxl_release_free(struct qxl_device *qdev,
143 qxl_surface_id_dealloc(qdev, release->surface_release_id);
145 spin_lock(&qdev->release_idr_lock);
146 idr_remove(&qdev->release_idr, release->id);
147 spin_unlock(&qdev->release_idr_lock);
159 atomic_dec(&qdev->release_count);
162 static int qxl_release_bo_alloc(struct qxl_device *qdev,
167 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
251 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
262 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
272 info = qxl_release_map(qdev, *release);
274 qxl_release_unmap(qdev, *release, info);
278 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
282 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
308 idr_ret = qxl_release_alloc(qdev, type, release);
314 atomic_inc(&qdev->release_count);
316 mutex_lock(&qdev->release_mutex);
317 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
318 free_bo = qdev->current_release_bo[cur_idx];
319 qdev->current_release_bo_offset[cur_idx] = 0;
320 qdev->current_release_bo[cur_idx] = NULL;
322 if (!qdev->current_release_bo[cur_idx]) {
323 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
325 mutex_unlock(&qdev->release_mutex);
330 qxl_release_free(qdev, *release);
335 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
338 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
339 qdev->current_release_bo_offset[cur_idx]++;
344 mutex_unlock(&qdev->release_mutex);
353 qxl_release_free(qdev, *release);
357 info = qxl_release_map(qdev, *release);
359 qxl_release_unmap(qdev, *release, info);
364 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
369 spin_lock(&qdev->release_idr_lock);
370 release = idr_find(&qdev->release_idr, id);
371 spin_unlock(&qdev->release_idr_lock);
380 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
387 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
394 void qxl_release_unmap(struct qxl_device *qdev,
402 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
410 struct qxl_device *qdev;
419 qdev = container_of(bdev, struct qxl_device, mman.bdev);
425 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,