Lines Matching defs:vsock
3 * virtio transport for vsock
10 * early virtio-vsock proof-of-concept bits.
70 struct virtio_vsock *vsock;
74 vsock = rcu_dereference(the_virtio_vsock);
75 if (!vsock) {
80 ret = vsock->guest_cid;
89 struct virtio_vsock *vsock =
95 mutex_lock(&vsock->tx_lock);
97 if (!vsock->tx_run)
100 vq = vsock->vqs[VSOCK_VQ_TX];
108 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
127 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
132 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
135 val = atomic_dec_return(&vsock->queued_replies);
149 mutex_unlock(&vsock->tx_lock);
152 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
159 struct virtio_vsock *vsock;
165 vsock = rcu_dereference(the_virtio_vsock);
166 if (!vsock) {
172 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
179 atomic_inc(&vsock->queued_replies);
181 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
182 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
192 struct virtio_vsock *vsock;
196 vsock = rcu_dereference(the_virtio_vsock);
197 if (!vsock) {
202 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
205 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
208 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
211 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
221 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
229 vq = vsock->vqs[VSOCK_VQ_RX];
245 vsock->rx_buf_nr++;
247 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
248 vsock->rx_buf_max_nr = vsock->rx_buf_nr;
254 struct virtio_vsock *vsock =
259 vq = vsock->vqs[VSOCK_VQ_TX];
260 mutex_lock(&vsock->tx_lock);
262 if (!vsock->tx_run)
277 mutex_unlock(&vsock->tx_lock);
280 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
284 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
286 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
290 val = atomic_read(&vsock->queued_replies);
296 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
302 vq = vsock->vqs[VSOCK_VQ_EVENT];
310 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
314 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
315 struct virtio_vsock_event *event = &vsock->event_list[i];
317 virtio_vsock_event_fill_one(vsock, event);
320 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
335 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
337 struct virtio_device *vdev = vsock->vdev;
342 vsock->guest_cid = le64_to_cpu(guest_cid);
346 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
351 virtio_vsock_update_guest_cid(vsock);
360 struct virtio_vsock *vsock =
364 vq = vsock->vqs[VSOCK_VQ_EVENT];
366 mutex_lock(&vsock->event_lock);
368 if (!vsock->event_run)
378 virtio_vsock_event_handle(vsock, event);
380 virtio_vsock_event_fill_one(vsock, event);
384 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
386 mutex_unlock(&vsock->event_lock);
391 struct virtio_vsock *vsock = vq->vdev->priv;
393 if (!vsock)
395 queue_work(virtio_vsock_workqueue, &vsock->event_work);
400 struct virtio_vsock *vsock = vq->vdev->priv;
402 if (!vsock)
404 queue_work(virtio_vsock_workqueue, &vsock->tx_work);
409 struct virtio_vsock *vsock = vq->vdev->priv;
411 if (!vsock)
413 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
470 struct virtio_vsock *vsock;
475 vsock = rcu_dereference(the_virtio_vsock);
476 if (vsock)
477 seqpacket_allow = vsock->seqpacket_allow;
485 struct virtio_vsock *vsock =
489 vq = vsock->vqs[VSOCK_VQ_RX];
491 mutex_lock(&vsock->rx_lock);
493 if (!vsock->rx_run)
502 if (!virtio_transport_more_replies(vsock)) {
514 vsock->rx_buf_nr--;
530 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
531 virtio_vsock_rx_fill(vsock);
532 mutex_unlock(&vsock->rx_lock);
535 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
537 struct virtio_device *vdev = vsock->vdev;
550 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
555 virtio_vsock_update_guest_cid(vsock);
562 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
564 mutex_lock(&vsock->tx_lock);
565 vsock->tx_run = true;
566 mutex_unlock(&vsock->tx_lock);
568 mutex_lock(&vsock->rx_lock);
569 virtio_vsock_rx_fill(vsock);
570 vsock->rx_run = true;
571 mutex_unlock(&vsock->rx_lock);
573 mutex_lock(&vsock->event_lock);
574 virtio_vsock_event_fill(vsock);
575 vsock->event_run = true;
576 mutex_unlock(&vsock->event_lock);
580 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
587 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
590 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
592 struct virtio_device *vdev = vsock->vdev;
602 mutex_lock(&vsock->rx_lock);
603 vsock->rx_run = false;
604 mutex_unlock(&vsock->rx_lock);
606 mutex_lock(&vsock->tx_lock);
607 vsock->tx_run = false;
608 mutex_unlock(&vsock->tx_lock);
610 mutex_lock(&vsock->event_lock);
611 vsock->event_run = false;
612 mutex_unlock(&vsock->event_lock);
619 mutex_lock(&vsock->rx_lock);
620 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
622 mutex_unlock(&vsock->rx_lock);
624 mutex_lock(&vsock->tx_lock);
625 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
627 mutex_unlock(&vsock->tx_lock);
629 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
637 struct virtio_vsock *vsock = NULL;
644 /* Only one virtio-vsock device per guest is supported */
651 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
652 if (!vsock) {
657 vsock->vdev = vdev;
659 vsock->rx_buf_nr = 0;
660 vsock->rx_buf_max_nr = 0;
661 atomic_set(&vsock->queued_replies, 0);
663 mutex_init(&vsock->tx_lock);
664 mutex_init(&vsock->rx_lock);
665 mutex_init(&vsock->event_lock);
666 skb_queue_head_init(&vsock->send_pkt_queue);
667 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
668 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
669 INIT_WORK(&vsock->event_work, virtio_transport_event_work);
670 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
673 vsock->seqpacket_allow = true;
675 vdev->priv = vsock;
677 ret = virtio_vsock_vqs_init(vsock);
681 rcu_assign_pointer(the_virtio_vsock, vsock);
682 virtio_vsock_vqs_start(vsock);
689 kfree(vsock);
696 struct virtio_vsock *vsock = vdev->priv;
704 virtio_vsock_vqs_del(vsock);
707 * all works before to free the vsock object to avoid use after free.
709 flush_work(&vsock->rx_work);
710 flush_work(&vsock->tx_work);
711 flush_work(&vsock->event_work);
712 flush_work(&vsock->send_pkt_work);
716 kfree(vsock);
722 struct virtio_vsock *vsock = vdev->priv;
729 virtio_vsock_vqs_del(vsock);
738 struct virtio_vsock *vsock = vdev->priv;
743 /* Only one virtio-vsock device per guest is supported */
750 ret = virtio_vsock_vqs_init(vsock);
754 rcu_assign_pointer(the_virtio_vsock, vsock);
755 virtio_vsock_vqs_start(vsock);
823 MODULE_DESCRIPTION("virtio transport for vsock");