Lines Matching defs:vgdev
59 struct virtio_gpu_device *vgdev = dev->dev_private;
61 schedule_work(&vgdev->ctrlq.dequeue_work);
67 struct virtio_gpu_device *vgdev = dev->dev_private;
69 schedule_work(&vgdev->cursorq.dequeue_work);
72 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
74 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
78 if (!vgdev->vbufs)
83 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
85 kmem_cache_destroy(vgdev->vbufs);
86 vgdev->vbufs = NULL;
90 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
96 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
154 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
158 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
163 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
168 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
173 static void free_vbuf(struct virtio_gpu_device *vgdev,
179 kmem_cache_free(vgdev->vbufs, vbuf);
198 struct virtio_gpu_device *vgdev =
207 spin_lock(&vgdev->ctrlq.qlock);
209 virtqueue_disable_cb(vgdev->ctrlq.vq);
210 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
212 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213 spin_unlock(&vgdev->ctrlq.qlock);
218 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
232 virtio_gpu_fence_event_process(vgdev, fence_id);
235 entry->resp_cb(vgdev, entry);
237 wake_up(&vgdev->ctrlq.ack_queue);
241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
243 free_vbuf(vgdev, entry);
249 struct virtio_gpu_device *vgdev =
256 spin_lock(&vgdev->cursorq.qlock);
258 virtqueue_disable_cb(vgdev->cursorq.vq);
259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 spin_unlock(&vgdev->cursorq.qlock);
267 trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
269 free_vbuf(vgdev, entry);
271 wake_up(&vgdev->cursorq.ack_queue);
314 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
322 struct virtqueue *vq = vgdev->ctrlq.vq;
325 if (!drm_dev_enter(vgdev->ddev, &idx)) {
328 free_vbuf(vgdev, vbuf);
332 if (vgdev->has_indirect)
336 spin_lock(&vgdev->ctrlq.qlock);
339 spin_unlock(&vgdev->ctrlq.qlock);
340 virtio_gpu_notify(vgdev);
341 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
349 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
360 vbuf->seqno = ++vgdev->ctrlq.seqno;
363 atomic_inc(&vgdev->pending_commands);
365 spin_unlock(&vgdev->ctrlq.qlock);
371 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
415 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
425 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
429 if (!atomic_read(&vgdev->pending_commands))
432 spin_lock(&vgdev->ctrlq.qlock);
433 atomic_set(&vgdev->pending_commands, 0);
434 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
435 spin_unlock(&vgdev->ctrlq.qlock);
438 virtqueue_notify(vgdev->ctrlq.vq);
441 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
444 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
447 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
450 struct virtqueue *vq = vgdev->cursorq.vq;
455 if (!drm_dev_enter(vgdev->ddev, &idx)) {
456 free_vbuf(vgdev, vbuf);
464 spin_lock(&vgdev->cursorq.qlock);
468 spin_unlock(&vgdev->cursorq.qlock);
469 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
470 spin_lock(&vgdev->cursorq.qlock);
473 vbuf->seqno = ++vgdev->cursorq.seqno;
481 spin_unlock(&vgdev->cursorq.qlock);
494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
513 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
517 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
535 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
543 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
556 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
567 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
570 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
580 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
591 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
594 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
604 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
607 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
610 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
622 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
626 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
635 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
645 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
648 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
655 spin_lock(&vgdev->display_info_lock);
656 for (i = 0; i < vgdev->num_scanouts; i++) {
657 vgdev->outputs[i].info = resp->pmodes[i];
669 vgdev->display_info_pending = false;
670 spin_unlock(&vgdev->display_info_lock);
671 wake_up(&vgdev->resp_wq);
673 if (!drm_helper_hpd_irq_event(vgdev->ddev))
674 drm_kms_helper_hotplug_event(vgdev->ddev);
677 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
686 spin_lock(&vgdev->display_info_lock);
687 if (vgdev->capsets) {
688 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
689 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
690 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
694 spin_unlock(&vgdev->display_info_lock);
695 wake_up(&vgdev->resp_wq);
698 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
707 spin_lock(&vgdev->display_info_lock);
708 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
719 spin_unlock(&vgdev->display_info_lock);
720 wake_up_all(&vgdev->resp_wq);
735 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
746 if (scanout >= vgdev->num_scanouts)
748 output = vgdev->outputs + scanout;
753 spin_lock(&vgdev->display_info_lock);
756 spin_unlock(&vgdev->display_info_lock);
759 wake_up(&vgdev->resp_wq);
762 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
774 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
779 vgdev->display_info_pending = true;
781 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
785 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
797 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
804 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
808 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
821 if (idx >= vgdev->num_capsets)
824 if (version > vgdev->capsets[idx].max_version)
831 max_size = vgdev->capsets[idx].max_size;
847 cache_ent->id = vgdev->capsets[idx].id;
850 spin_lock(&vgdev->display_info_lock);
852 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
853 if (search_ent->id == vgdev->capsets[idx].id &&
860 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
861 spin_unlock(&vgdev->display_info_lock);
872 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
876 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
879 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
884 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
891 if (WARN_ON(!vgdev->has_edid))
894 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
901 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
906 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
912 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
919 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
927 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
930 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
936 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
941 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
944 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
952 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
959 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
962 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
970 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
977 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
981 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
990 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1008 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1013 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1025 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1028 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1031 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1045 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1048 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1061 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1075 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1078 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1087 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1098 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1101 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1106 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1110 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1117 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1119 virtio_gpu_queue_cursor(vgdev, vbuf);
1122 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1131 spin_lock(&vgdev->resource_export_lock);
1141 spin_unlock(&vgdev->resource_export_lock);
1143 wake_up_all(&vgdev->resp_wq);
1147 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1157 spin_lock(&vgdev->resource_export_lock);
1159 spin_unlock(&vgdev->resource_export_lock);
1165 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1173 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1177 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1187 spin_lock(&vgdev->host_visible_lock);
1196 spin_unlock(&vgdev->host_visible_lock);
1197 wake_up_all(&vgdev->resp_wq);
1200 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1213 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1222 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1226 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1232 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1238 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1242 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1251 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1266 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1270 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1282 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1303 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);