Lines Matching defs:qdev

60 	struct qxl_device *qdev;
66 qdev = container_of(fence->lock, struct qxl_device, release_lock);
76 qxl_io_notify_oom(qdev);
79 if (!qxl_queue_garbage_collect(qdev, true))
124 qxl_release_alloc(struct qxl_device *qdev, int type,
143 spin_lock(&qdev->release_idr_lock);
144 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
145 release->base.seqno = ++qdev->release_seqno;
146 spin_unlock(&qdev->release_idr_lock);
177 qxl_release_free(struct qxl_device *qdev,
183 qxl_surface_id_dealloc(qdev, release->surface_release_id);
185 spin_lock(&qdev->release_idr_lock);
186 idr_remove(&qdev->release_idr, release->id);
187 spin_unlock(&qdev->release_idr_lock);
201 static int qxl_release_bo_alloc(struct qxl_device *qdev,
206 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
290 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
301 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
311 info = qxl_release_map(qdev, *release);
313 qxl_release_unmap(qdev, *release, info);
317 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
321 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
347 idr_ret = qxl_release_alloc(qdev, type, release);
354 mutex_lock(&qdev->release_mutex);
355 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
356 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
357 qdev->current_release_bo_offset[cur_idx] = 0;
358 qdev->current_release_bo[cur_idx] = NULL;
360 if (!qdev->current_release_bo[cur_idx]) {
361 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
363 mutex_unlock(&qdev->release_mutex);
364 qxl_release_free(qdev, *release);
369 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
372 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
373 qdev->current_release_bo_offset[cur_idx]++;
378 mutex_unlock(&qdev->release_mutex);
383 qxl_release_free(qdev, *release);
387 info = qxl_release_map(qdev, *release);
389 qxl_release_unmap(qdev, *release, info);
394 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
399 spin_lock(&qdev->release_idr_lock);
400 release = idr_find(&qdev->release_idr, id);
401 spin_unlock(&qdev->release_idr_lock);
410 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
417 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
424 void qxl_release_unmap(struct qxl_device *qdev,
432 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
440 struct qxl_device *qdev;
449 qdev = container_of(bdev, struct qxl_device, mman.bdev);
455 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,