Lines Matching defs:vrp
128 * @vrp: the virtio remote processor device this channel belongs to
136 struct virtproc_info *vrp;
238 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
245 struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
268 mutex_lock(&vrp->endpoints_lock);
271 id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
278 mutex_unlock(&vrp->endpoints_lock);
283 mutex_unlock(&vrp->endpoints_lock);
295 return __rpmsg_create_ept(vch->vrp, rpdev, cb, priv, chinfo.src);
300 * @vrp: virtproc which owns this ept
309 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
312 mutex_lock(&vrp->endpoints_lock);
313 idr_remove(&vrp->endpoints, ept->addr);
314 mutex_unlock(&vrp->endpoints_lock);
328 __rpmsg_destroy_ept(vch->vrp, ept);
334 struct virtproc_info *vrp = vch->vrp;
340 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
344 nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
345 nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_CREATE);
358 struct virtproc_info *vrp = vch->vrp;
364 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
368 nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
369 nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_DESTROY);
399 static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
404 struct device *tmp, *dev = &vrp->vdev->dev;
421 /* Link the channel to our vrp */
422 vch->vrp = vrp;
438 rpdev->dev.parent = &vrp->vdev->dev;
448 static void *get_a_tx_buf(struct virtproc_info *vrp)
454 mutex_lock(&vrp->tx_lock);
460 if (vrp->last_sbuf < vrp->num_bufs / 2)
461 ret = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++;
464 ret = virtqueue_get_buf(vrp->svq, &len);
466 mutex_unlock(&vrp->tx_lock);
473 * @vrp: virtual remote processor state
487 static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
490 mutex_lock(&vrp->tx_lock);
493 if (atomic_inc_return(&vrp->sleepers) == 1)
495 virtqueue_enable_cb(vrp->svq);
497 mutex_unlock(&vrp->tx_lock);
502 * @vrp: virtual remote processor state
514 static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
517 mutex_lock(&vrp->tx_lock);
520 if (atomic_dec_and_test(&vrp->sleepers))
522 virtqueue_disable_cb(vrp->svq);
524 mutex_unlock(&vrp->tx_lock);
566 struct virtproc_info *vrp = vch->vrp;
587 if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
593 msg = get_a_tx_buf(vrp);
600 rpmsg_upref_sleepers(vrp);
608 err = wait_event_interruptible_timeout(vrp->sendq,
609 (msg = get_a_tx_buf(vrp)),
613 rpmsg_downref_sleepers(vrp);
622 msg->len = cpu_to_virtio16(vrp->vdev, len);
624 msg->src = cpu_to_virtio32(vrp->vdev, src);
625 msg->dst = cpu_to_virtio32(vrp->vdev, dst);
638 mutex_lock(&vrp->tx_lock);
641 err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
653 virtqueue_kick(vrp->svq);
655 mutex_unlock(&vrp->tx_lock);
709 static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
714 unsigned int msg_len = virtio16_to_cpu(vrp->vdev, msg->len);
718 virtio32_to_cpu(vrp->vdev, msg->src),
719 virtio32_to_cpu(vrp->vdev, msg->dst), msg_len,
720 virtio16_to_cpu(vrp->vdev, msg->flags),
721 virtio32_to_cpu(vrp->vdev, msg->reserved));
731 if (len > vrp->buf_size ||
738 mutex_lock(&vrp->endpoints_lock);
740 ept = idr_find(&vrp->endpoints, virtio32_to_cpu(vrp->vdev, msg->dst));
746 mutex_unlock(&vrp->endpoints_lock);
754 virtio32_to_cpu(vrp->vdev, msg->src));
764 rpmsg_sg_init(&sg, msg, vrp->buf_size);
767 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
779 struct virtproc_info *vrp = rvq->vdev->priv;
792 err = rpmsg_recv_single(vrp, dev, msg, len);
805 virtqueue_kick(vrp->rvq);
817 struct virtproc_info *vrp = svq->vdev->priv;
822 wake_up_interruptible(&vrp->sendq);
832 struct virtproc_info *vrp = priv;
833 struct device *dev = &vrp->vdev->dev;
862 chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr);
865 virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ?
868 if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) {
869 ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
873 newch = rpmsg_create_channel(vrp, &chinfo);
886 struct virtproc_info *vrp;
892 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
893 if (!vrp)
896 vrp->vdev = vdev;
898 idr_init(&vrp->endpoints);
899 mutex_init(&vrp->endpoints_lock);
900 mutex_init(&vrp->tx_lock);
901 init_waitqueue_head(&vrp->sendq);
908 vrp->rvq = vqs[0];
909 vrp->svq = vqs[1];
912 WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
913 virtqueue_get_vring_size(vrp->svq));
916 if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
917 vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
919 vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
921 vrp->buf_size = MAX_RPMSG_BUF_SIZE;
923 total_buf_space = vrp->num_bufs * vrp->buf_size;
927 total_buf_space, &vrp->bufs_dma,
935 bufs_va, &vrp->bufs_dma);
938 vrp->rbufs = bufs_va;
941 vrp->sbufs = bufs_va + total_buf_space / 2;
944 for (i = 0; i < vrp->num_bufs / 2; i++) {
946 void *cpu_addr = vrp->rbufs + i * vrp->buf_size;
948 rpmsg_sg_init(&sg, cpu_addr, vrp->buf_size);
950 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
956 virtqueue_disable_cb(vrp->svq);
958 vdev->priv = vrp;
963 vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
964 vrp, RPMSG_NS_ADDR);
965 if (!vrp->ns_ept) {
976 notify = virtqueue_kick_prepare(vrp->rvq);
987 virtqueue_notify(vrp->rvq);
995 bufs_va, vrp->bufs_dma);
997 vdev->config->del_vqs(vrp->vdev);
999 kfree(vrp);
1012 struct virtproc_info *vrp = vdev->priv;
1013 size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
1022 if (vrp->ns_ept)
1023 __rpmsg_destroy_ept(vrp, vrp->ns_ept);
1025 idr_destroy(&vrp->endpoints);
1027 vdev->config->del_vqs(vrp->vdev);
1030 vrp->rbufs, vrp->bufs_dma);
1032 kfree(vrp);