Lines Matching defs:vrp

98  * @vrp: the virtio remote processor device this channel belongs to
106 struct virtproc_info *vrp;
152 static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
209 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
216 struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
239 mutex_lock(&vrp->endpoints_lock);
242 id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
249 mutex_unlock(&vrp->endpoints_lock);
254 mutex_unlock(&vrp->endpoints_lock);
263 struct virtproc_info *vrp = vch->vrp;
265 return __rpmsg_create_channel(vrp, chinfo);
272 struct virtproc_info *vrp = vch->vrp;
274 return rpmsg_unregister_device(&vrp->vdev->dev, chinfo);
284 return __rpmsg_create_ept(vch->vrp, rpdev, cb, priv, chinfo.src);
289 * @vrp: virtproc which owns this ept
298 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
301 mutex_lock(&vrp->endpoints_lock);
302 idr_remove(&vrp->endpoints, ept->addr);
303 mutex_unlock(&vrp->endpoints_lock);
317 __rpmsg_destroy_ept(vch->vrp, ept);
323 struct virtproc_info *vrp = vch->vrp;
329 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
347 struct virtproc_info *vrp = vch->vrp;
353 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
390 static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
395 struct device *tmp, *dev = &vrp->vdev->dev;
412 /* Link the channel to our vrp */
413 vch->vrp = vrp;
420 rpdev->little_endian = virtio_is_little_endian(vrp->vdev);
430 rpdev->dev.parent = &vrp->vdev->dev;
440 static void *get_a_tx_buf(struct virtproc_info *vrp)
446 mutex_lock(&vrp->tx_lock);
452 if (vrp->last_sbuf < vrp->num_bufs / 2)
453 ret = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++;
456 ret = virtqueue_get_buf(vrp->svq, &len);
458 mutex_unlock(&vrp->tx_lock);
465 * @vrp: virtual remote processor state
479 static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
482 mutex_lock(&vrp->tx_lock);
485 if (atomic_inc_return(&vrp->sleepers) == 1)
487 virtqueue_enable_cb(vrp->svq);
489 mutex_unlock(&vrp->tx_lock);
494 * @vrp: virtual remote processor state
506 static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
509 mutex_lock(&vrp->tx_lock);
512 if (atomic_dec_and_test(&vrp->sleepers))
514 virtqueue_disable_cb(vrp->svq);
516 mutex_unlock(&vrp->tx_lock);
558 struct virtproc_info *vrp = vch->vrp;
579 if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
585 msg = get_a_tx_buf(vrp);
592 rpmsg_upref_sleepers(vrp);
600 err = wait_event_interruptible_timeout(vrp->sendq,
601 (msg = get_a_tx_buf(vrp)),
605 rpmsg_downref_sleepers(vrp);
630 mutex_lock(&vrp->tx_lock);
633 err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
645 virtqueue_kick(vrp->svq);
647 mutex_unlock(&vrp->tx_lock);
706 return vch->vrp->buf_size - sizeof(struct rpmsg_hdr);
709 static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
714 bool little_endian = virtio_is_little_endian(vrp->vdev);
732 if (len > vrp->buf_size ||
739 mutex_lock(&vrp->endpoints_lock);
741 ept = idr_find(&vrp->endpoints, __rpmsg32_to_cpu(little_endian, msg->dst));
747 mutex_unlock(&vrp->endpoints_lock);
765 rpmsg_sg_init(&sg, msg, vrp->buf_size);
768 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
780 struct virtproc_info *vrp = rvq->vdev->priv;
793 err = rpmsg_recv_single(vrp, dev, msg, len);
806 virtqueue_kick(vrp->rvq);
818 struct virtproc_info *vrp = svq->vdev->priv;
823 wake_up_interruptible(&vrp->sendq);
833 struct virtproc_info *vrp = vdev->priv;
842 /* Link the channel to the vrp */
843 vch->vrp = vrp;
849 rpdev_ctrl->dev.parent = &vrp->vdev->dev;
851 rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev);
874 struct virtproc_info *vrp;
882 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
883 if (!vrp)
886 vrp->vdev = vdev;
888 idr_init(&vrp->endpoints);
889 mutex_init(&vrp->endpoints_lock);
890 mutex_init(&vrp->tx_lock);
891 init_waitqueue_head(&vrp->sendq);
898 vrp->rvq = vqs[0];
899 vrp->svq = vqs[1];
902 WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
903 virtqueue_get_vring_size(vrp->svq));
906 if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
907 vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
909 vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
911 vrp->buf_size = MAX_RPMSG_BUF_SIZE;
913 total_buf_space = vrp->num_bufs * vrp->buf_size;
917 total_buf_space, &vrp->bufs_dma,
925 bufs_va, &vrp->bufs_dma);
928 vrp->rbufs = bufs_va;
931 vrp->sbufs = bufs_va + total_buf_space / 2;
934 for (i = 0; i < vrp->num_bufs / 2; i++) {
936 void *cpu_addr = vrp->rbufs + i * vrp->buf_size;
938 rpmsg_sg_init(&sg, cpu_addr, vrp->buf_size);
940 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
946 virtqueue_disable_cb(vrp->svq);
948 vdev->priv = vrp;
964 /* Link the channel to our vrp */
965 vch->vrp = vrp;
970 rpdev_ns->little_endian = virtio_is_little_endian(vrp->vdev);
972 rpdev_ns->dev.parent = &vrp->vdev->dev;
985 notify = virtqueue_kick_prepare(vrp->rvq);
996 virtqueue_notify(vrp->rvq);
1006 bufs_va, vrp->bufs_dma);
1008 vdev->config->del_vqs(vrp->vdev);
1010 kfree(vrp);
1023 struct virtproc_info *vrp = vdev->priv;
1024 size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
1033 idr_destroy(&vrp->endpoints);
1035 vdev->config->del_vqs(vrp->vdev);
1038 vrp->rbufs, vrp->bufs_dma);
1040 kfree(vrp);