Lines Matching refs:ubufs

93 	 *  1: no outstanding ubufs
94 * >1: outstanding ubufs
123 /* Reference counting for outstanding ubufs.
125 struct vhost_net_ubuf_ref *ubufs;
237 struct vhost_net_ubuf_ref *ubufs;
241 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
242 if (!ubufs)
244 atomic_set(&ubufs->refcount, 1);
245 init_waitqueue_head(&ubufs->wait);
246 ubufs->vq = vq;
247 return ubufs;
250 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
252 int r = atomic_sub_return(1, &ubufs->refcount);
254 wake_up(&ubufs->wait);
258 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
260 vhost_net_ubuf_put(ubufs);
261 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
264 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
266 vhost_net_ubuf_put_and_wait(ubufs);
267 kfree(ubufs);
312 n->vqs[i].ubufs = NULL;
389 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
390 struct vhost_virtqueue *vq = ubufs->vq;
398 cnt = vhost_net_ubuf_put(ubufs);
875 struct vhost_net_ubuf_ref *ubufs;
912 ubuf->ctx = nvq->ubufs;
921 ubufs = nvq->ubufs;
922 atomic_inc(&ubufs->refcount);
926 ubufs = NULL;
942 vhost_net_ubuf_put(ubufs);
1335 n->vqs[i].ubufs = NULL;
1389 if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1394 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1397 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1506 struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1539 ubufs = vhost_net_ubuf_alloc(vq,
1541 if (IS_ERR(ubufs)) {
1542 r = PTR_ERR(ubufs);
1562 oldubufs = nvq->ubufs;
1563 nvq->ubufs = ubufs;
1590 if (ubufs)
1591 vhost_net_ubuf_put_wait_and_free(ubufs);