Lines Matching defs:vsock

3  * vhost transport for vsock
72 struct vhost_vsock *vsock;
74 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
75 u32 other_cid = vsock->guest_cid;
82 return vsock;
90 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
93 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
107 vhost_disable_notify(&vsock->dev, vq);
119 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
122 vhost_enable_notify(&vsock->dev, vq);
129 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
134 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
138 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
139 vhost_disable_notify(&vsock->dev, vq);
229 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
234 val = atomic_dec_return(&vsock->queued_replies);
247 vhost_signal(&vsock->dev, vq);
259 struct vhost_vsock *vsock;
261 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
262 vq = &vsock->vqs[VSOCK_VQ_RX];
264 vhost_transport_do_send_pkt(vsock, vq);
271 struct vhost_vsock *vsock;
277 vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
278 if (!vsock) {
285 atomic_inc(&vsock->queued_replies);
287 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
288 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
297 struct vhost_vsock *vsock;
304 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
305 if (!vsock)
308 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
311 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
314 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
386 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
388 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
392 val = atomic_read(&vsock->queued_replies);
451 struct vhost_vsock *vsock;
455 vsock = vhost_vsock_get(remote_cid);
457 if (vsock)
458 seqpacket_allow = vsock->seqpacket_allow;
469 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
484 vhost_disable_notify(&vsock->dev, vq);
488 if (!vhost_vsock_more_replies(vsock)) {
502 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
503 vhost_disable_notify(&vsock->dev, vq);
523 if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
536 vhost_signal(&vsock->dev, vq);
546 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
549 vhost_transport_do_send_pkt(vsock, vq);
552 static int vhost_vsock_start(struct vhost_vsock *vsock)
558 mutex_lock(&vsock->dev.mutex);
560 ret = vhost_dev_check_owner(&vsock->dev);
564 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
565 vq = &vsock->vqs[i];
575 vhost_vq_set_backend(vq, vsock);
587 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
589 mutex_unlock(&vsock->dev.mutex);
596 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
597 vq = &vsock->vqs[i];
604 mutex_unlock(&vsock->dev.mutex);
608 static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
613 mutex_lock(&vsock->dev.mutex);
616 ret = vhost_dev_check_owner(&vsock->dev);
621 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
622 struct vhost_virtqueue *vq = &vsock->vqs[i];
630 mutex_unlock(&vsock->dev.mutex);
634 static void vhost_vsock_free(struct vhost_vsock *vsock)
636 kvfree(vsock);
642 struct vhost_vsock *vsock;
648 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
649 if (!vsock)
652 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
658 vsock->guest_cid = 0; /* no CID assigned yet */
660 atomic_set(&vsock->queued_replies, 0);
662 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
663 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
664 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
665 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
667 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
671 file->private_data = vsock;
672 skb_queue_head_init(&vsock->send_pkt_queue);
673 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
677 vhost_vsock_free(vsock);
681 static void vhost_vsock_flush(struct vhost_vsock *vsock)
683 vhost_dev_flush(&vsock->dev);
714 struct vhost_vsock *vsock = file->private_data;
717 if (vsock->guest_cid)
718 hash_del_rcu(&vsock->hash);
721 /* Wait for other CPUs to finish using vsock */
730 * need to stop the vsock device in any case.
734 vhost_vsock_stop(vsock, false);
735 vhost_vsock_flush(vsock);
736 vhost_dev_stop(&vsock->dev);
738 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
740 vhost_dev_cleanup(&vsock->dev);
741 kfree(vsock->dev.vqs);
742 vhost_vsock_free(vsock);
746 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
768 if (other && other != vsock) {
773 if (vsock->guest_cid)
774 hash_del_rcu(&vsock->hash);
776 vsock->guest_cid = guest_cid;
777 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
783 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
791 mutex_lock(&vsock->dev.mutex);
793 !vhost_log_access_ok(&vsock->dev)) {
798 if (vhost_init_device_iotlb(&vsock->dev))
803 vsock->seqpacket_allow = true;
805 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
806 vq = &vsock->vqs[i];
811 mutex_unlock(&vsock->dev.mutex);
815 mutex_unlock(&vsock->dev.mutex);
822 struct vhost_vsock *vsock = f->private_data;
833 return vhost_vsock_set_cid(vsock, guest_cid);
838 return vhost_vsock_start(vsock);
840 return vhost_vsock_stop(vsock, true);
849 return vhost_vsock_set_features(vsock, features);
860 vhost_set_backend_features(&vsock->dev, features);
863 mutex_lock(&vsock->dev.mutex);
864 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
866 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
868 vhost_vsock_flush(vsock);
869 mutex_unlock(&vsock->dev.mutex);
877 struct vhost_vsock *vsock = file->private_data;
878 struct vhost_dev *dev = &vsock->dev;
888 struct vhost_vsock *vsock = file->private_data;
889 struct vhost_dev *dev = &vsock->dev;
896 struct vhost_vsock *vsock = file->private_data;
897 struct vhost_dev *dev = &vsock->dev;
916 .name = "vhost-vsock",
948 MODULE_DESCRIPTION("vhost transport for vsock ");
950 MODULE_ALIAS("devname:vhost-vsock");