Lines Matching defs:vgdev

57 	struct virtio_gpu_device *vgdev = dev->dev_private;
59 schedule_work(&vgdev->ctrlq.dequeue_work);
65 struct virtio_gpu_device *vgdev = dev->dev_private;
67 schedule_work(&vgdev->cursorq.dequeue_work);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
76 if (!vgdev->vbufs)
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
122 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
128 (vgdev, sizeof(struct virtio_gpu_update_cursor),
138 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
146 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
152 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
156 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
161 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
166 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
171 static void free_vbuf(struct virtio_gpu_device *vgdev,
177 kmem_cache_free(vgdev->vbufs, vbuf);
196 struct virtio_gpu_device *vgdev =
205 spin_lock(&vgdev->ctrlq.qlock);
207 virtqueue_disable_cb(vgdev->ctrlq.vq);
208 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
211 spin_unlock(&vgdev->ctrlq.qlock);
216 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
239 entry->resp_cb(vgdev, entry);
241 wake_up(&vgdev->ctrlq.ack_queue);
244 virtio_gpu_fence_event_process(vgdev, fence_id);
248 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
250 free_vbuf(vgdev, entry);
256 struct virtio_gpu_device *vgdev =
263 spin_lock(&vgdev->cursorq.qlock);
265 virtqueue_disable_cb(vgdev->cursorq.vq);
266 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
267 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
268 spin_unlock(&vgdev->cursorq.qlock);
272 free_vbuf(vgdev, entry);
274 wake_up(&vgdev->cursorq.ack_queue);
317 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
325 struct virtqueue *vq = vgdev->ctrlq.vq;
328 if (!drm_dev_enter(vgdev->ddev, &idx)) {
331 free_vbuf(vgdev, vbuf);
335 if (vgdev->has_indirect)
339 spin_lock(&vgdev->ctrlq.qlock);
342 spin_unlock(&vgdev->ctrlq.qlock);
343 virtio_gpu_notify(vgdev);
344 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
352 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
365 atomic_inc(&vgdev->pending_commands);
367 spin_unlock(&vgdev->ctrlq.qlock);
373 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
417 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
427 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
431 if (!atomic_read(&vgdev->pending_commands))
434 spin_lock(&vgdev->ctrlq.qlock);
435 atomic_set(&vgdev->pending_commands, 0);
436 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
437 spin_unlock(&vgdev->ctrlq.qlock);
440 virtqueue_notify(vgdev->ctrlq.vq);
443 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
446 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
449 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
452 struct virtqueue *vq = vgdev->cursorq.vq;
457 if (!drm_dev_enter(vgdev->ddev, &idx)) {
458 free_vbuf(vgdev, vbuf);
466 spin_lock(&vgdev->cursorq.qlock);
470 spin_unlock(&vgdev->cursorq.qlock);
471 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
472 spin_lock(&vgdev->cursorq.qlock);
481 spin_unlock(&vgdev->cursorq.qlock);
494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
513 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
517 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
535 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
543 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
556 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
567 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
570 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
578 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
588 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
591 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
601 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
605 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
608 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
620 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
624 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
633 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
643 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
646 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
653 spin_lock(&vgdev->display_info_lock);
654 for (i = 0; i < vgdev->num_scanouts; i++) {
655 vgdev->outputs[i].info = resp->pmodes[i];
667 vgdev->display_info_pending = false;
668 spin_unlock(&vgdev->display_info_lock);
669 wake_up(&vgdev->resp_wq);
671 if (!drm_helper_hpd_irq_event(vgdev->ddev))
672 drm_kms_helper_hotplug_event(vgdev->ddev);
675 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
684 spin_lock(&vgdev->display_info_lock);
685 if (vgdev->capsets) {
686 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
687 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
688 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
692 spin_unlock(&vgdev->display_info_lock);
693 wake_up(&vgdev->resp_wq);
696 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
705 spin_lock(&vgdev->display_info_lock);
706 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
717 spin_unlock(&vgdev->display_info_lock);
718 wake_up_all(&vgdev->resp_wq);
733 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
744 if (scanout >= vgdev->num_scanouts)
746 output = vgdev->outputs + scanout;
751 spin_lock(&vgdev->display_info_lock);
754 spin_unlock(&vgdev->display_info_lock);
757 wake_up(&vgdev->resp_wq);
760 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
772 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
777 vgdev->display_info_pending = true;
779 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
783 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
795 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
802 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
806 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
819 if (idx >= vgdev->num_capsets)
822 if (version > vgdev->capsets[idx].max_version)
829 max_size = vgdev->capsets[idx].max_size;
845 cache_ent->id = vgdev->capsets[idx].id;
848 spin_lock(&vgdev->display_info_lock);
850 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
851 if (search_ent->id == vgdev->capsets[idx].id &&
858 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
859 spin_unlock(&vgdev->display_info_lock);
870 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
874 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
877 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
882 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
889 if (WARN_ON(!vgdev->has_edid))
892 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
899 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
904 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
910 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
916 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
924 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
927 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
933 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
938 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
941 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
949 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
956 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
959 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
967 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
974 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
978 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
987 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1005 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1010 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1020 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1024 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1027 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1039 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1042 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1053 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1065 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1068 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1077 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1088 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1091 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1096 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1100 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1107 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1109 virtio_gpu_queue_cursor(vgdev, vbuf);
1112 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1121 spin_lock(&vgdev->resource_export_lock);
1131 spin_unlock(&vgdev->resource_export_lock);
1133 wake_up_all(&vgdev->resp_wq);
1137 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1147 spin_lock(&vgdev->resource_export_lock);
1149 spin_unlock(&vgdev->resource_export_lock);
1155 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1163 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);