Lines Matching refs:vrh
38 static inline int __vringh_get_head(const struct vringh *vrh,
39 int (*getu16)(const struct vringh *vrh,
46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
49 &vrh->vring.avail->idx);
54 return vrh->vring.num;
57 virtio_rmb(vrh->weak_barriers);
59 i = *last_avail_idx & (vrh->vring.num - 1);
61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
64 *last_avail_idx, &vrh->vring.avail->ring[i]);
68 if (head >= vrh->vring.num) {
70 head, vrh->vring.num);
107 static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
110 int (*xfer)(const struct vringh *vrh,
120 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
143 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
149 if (!getrange(vrh, addr, range))
177 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
186 static int move_to_indirect(const struct vringh *vrh,
199 len = vringh32_to_cpu(vrh, desc->len);
206 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
207 *up_next = vringh16_to_cpu(vrh, desc->next);
245 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
251 *descs = vrh->vring.desc;
252 *desc_max = vrh->vring.num;
256 static int slow_copy(struct vringh *vrh, void *dst, const void *src,
257 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
259 bool (*getrange)(struct vringh *vrh,
262 bool (*getrange)(struct vringh *vrh,
266 int (*copy)(const struct vringh *vrh,
278 if (!rcheck(vrh, addr, &part, range, getrange))
281 err = copy(vrh, dst, src, part);
293 __vringh_iov(struct vringh *vrh, u16 i,
296 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
302 int (*copy)(const struct vringh *vrh,
311 descs = vrh->vring.desc;
312 desc_max = vrh->vring.num;
330 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
333 err = copy(vrh, &desc, &descs[i], sizeof(desc));
338 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
339 u64 a = vringh64_to_cpu(vrh, desc.addr);
342 len = vringh32_to_cpu(vrh, desc.len);
343 if (!rcheck(vrh, a, &len, &range, getrange)) {
348 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
355 err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
367 if (count > vrh->vring.num || indirect_count > desc_max) {
373 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
394 len = vringh32_to_cpu(vrh, desc.len);
395 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
400 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
413 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
414 desc.len = cpu_to_vringh32(vrh,
415 vringh32_to_cpu(vrh, desc.len) - len);
416 desc.addr = cpu_to_vringh64(vrh,
417 vringh64_to_cpu(vrh, desc.addr) + len);
421 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
422 i = vringh16_to_cpu(vrh, desc.next);
426 i = return_from_indirect(vrh, &up_next,
447 static inline int __vringh_complete(struct vringh *vrh,
450 int (*putu16)(const struct vringh *vrh,
452 int (*putused)(const struct vringh *vrh,
461 used_ring = vrh->vring.used;
462 used_idx = vrh->last_used_idx + vrh->completed;
464 off = used_idx % vrh->vring.num;
467 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
468 u16 part = vrh->vring.num - off;
469 err = putused(vrh, &used_ring->ring[off], used, part);
471 err = putused(vrh, &used_ring->ring[0], used + part,
474 err = putused(vrh, &used_ring->ring[off], used, num_used);
483 virtio_wmb(vrh->weak_barriers);
485 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
488 &vrh->vring.used->idx);
492 vrh->completed += num_used;
497 static inline int __vringh_need_notify(struct vringh *vrh,
498 int (*getu16)(const struct vringh *vrh,
509 virtio_mb(vrh->weak_barriers);
512 if (!vrh->event_indices) {
514 err = getu16(vrh, &flags, &vrh->vring.avail->flags);
517 &vrh->vring.avail->flags);
524 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
527 &vring_used_event(&vrh->vring));
532 if (unlikely(vrh->completed > 0xffff))
536 vrh->last_used_idx + vrh->completed,
537 vrh->last_used_idx);
539 vrh->last_used_idx += vrh->completed;
540 vrh->completed = 0;
544 static inline bool __vringh_notify_enable(struct vringh *vrh,
545 int (*getu16)(const struct vringh *vrh,
547 int (*putu16)(const struct vringh *vrh,
552 if (!vrh->event_indices) {
554 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
556 &vrh->vring.used->flags);
560 if (putu16(vrh, &vring_avail_event(&vrh->vring),
561 vrh->last_avail_idx) != 0) {
563 &vring_avail_event(&vrh->vring));
570 virtio_mb(vrh->weak_barriers);
572 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
574 &vrh->vring.avail->idx);
581 return avail == vrh->last_avail_idx;
584 static inline void __vringh_notify_disable(struct vringh *vrh,
585 int (*putu16)(const struct vringh *vrh,
588 if (!vrh->event_indices) {
590 if (putu16(vrh, &vrh->vring.used->flags,
593 &vrh->vring.used->flags);
599 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
603 *val = vringh16_to_cpu(vrh, v);
607 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
609 __virtio16 v = cpu_to_vringh16(vrh, val);
613 static inline int copydesc_user(const struct vringh *vrh,
620 static inline int putused_user(const struct vringh *vrh,
629 static inline int xfer_from_user(const struct vringh *vrh, void *src,
636 static inline int xfer_to_user(const struct vringh *vrh,
645 * @vrh: the vringh to initialize.
656 int vringh_init_user(struct vringh *vrh, u64 features,
668 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
669 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
670 vrh->weak_barriers = weak_barriers;
671 vrh->completed = 0;
672 vrh->last_avail_idx = 0;
673 vrh->last_used_idx = 0;
674 vrh->vring.num = num;
676 vrh->vring.desc = (__force struct vring_desc *)desc;
677 vrh->vring.avail = (__force struct vring_avail *)avail;
678 vrh->vring.used = (__force struct vring_used *)used;
685 * @vrh: the userspace vring.
695 * *head will be vrh->vring.num. You may be able to ignore an invalid
703 int vringh_getdesc_user(struct vringh *vrh,
706 bool (*getrange)(struct vringh *vrh,
712 *head = vrh->vring.num;
713 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
718 if (err == vrh->vring.num)
742 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
785 * @vrh: the vring.
791 void vringh_abandon_user(struct vringh *vrh, unsigned int num)
795 vrh->last_avail_idx -= num;
801 * @vrh: the vring.
808 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
812 used.id = cpu_to_vringh32(vrh, head);
813 used.len = cpu_to_vringh32(vrh, len);
814 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
820 * @vrh: the vring.
827 int vringh_complete_multi_user(struct vringh *vrh,
831 return __vringh_complete(vrh, used, num_used,
838 * @vrh: the vring.
843 bool vringh_notify_enable_user(struct vringh *vrh)
845 return __vringh_notify_enable(vrh, getu16_user, putu16_user);
851 * @vrh: the vring.
856 void vringh_notify_disable_user(struct vringh *vrh)
858 __vringh_notify_disable(vrh, putu16_user);
864 * @vrh: the vring we've called vringh_complete_user() on.
868 int vringh_need_notify_user(struct vringh *vrh)
870 return __vringh_need_notify(vrh, getu16_user);
875 static inline int getu16_kern(const struct vringh *vrh,
878 *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
882 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
884 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
888 static inline int copydesc_kern(const struct vringh *vrh,
895 static inline int putused_kern(const struct vringh *vrh,
904 static inline int xfer_kern(const struct vringh *vrh, void *src,
911 static inline int kern_xfer(const struct vringh *vrh, void *dst,
920 * @vrh: the vringh to initialize.
930 int vringh_init_kern(struct vringh *vrh, u64 features,
942 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
943 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
944 vrh->weak_barriers = weak_barriers;
945 vrh->completed = 0;
946 vrh->last_avail_idx = 0;
947 vrh->last_used_idx = 0;
948 vrh->vring.num = num;
949 vrh->vring.desc = desc;
950 vrh->vring.avail = avail;
951 vrh->vring.used = used;
958 * @vrh: the kernelspace vring.
968 * *head will be vrh->vring.num. You may be able to ignore an invalid
976 int vringh_getdesc_kern(struct vringh *vrh,
984 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
989 if (err == vrh->vring.num)
993 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1033 * @vrh: the vring.
1039 void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1043 vrh->last_avail_idx -= num;
1049 * @vrh: the vring.
1056 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1060 used.id = cpu_to_vringh32(vrh, head);
1061 used.len = cpu_to_vringh32(vrh, len);
1063 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1069 * @vrh: the vring.
1074 bool vringh_notify_enable_kern(struct vringh *vrh)
1076 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1082 * @vrh: the vring.
1087 void vringh_notify_disable_kern(struct vringh *vrh)
1089 __vringh_notify_disable(vrh, putu16_kern);
1095 * @vrh: the vring we've called vringh_complete_kern() on.
1099 int vringh_need_notify_kern(struct vringh *vrh)
1101 return __vringh_need_notify(vrh, getu16_kern);
1115 static int iotlb_translate(const struct vringh *vrh,
1120 struct vhost_iotlb *iotlb = vrh->iotlb;
1124 spin_lock(vrh->iotlb_lock);
1149 if (vrh->use_va) {
1167 spin_unlock(vrh->iotlb_lock);
1177 static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1195 ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1203 if (vrh->use_va) {
1223 static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1241 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1249 if (vrh->use_va) {
1269 static inline int getu16_iotlb(const struct vringh *vrh,
1284 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1289 if (vrh->use_va) {
1301 *val = vringh16_to_cpu(vrh, tmp);
1306 static inline int putu16_iotlb(const struct vringh *vrh,
1321 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1326 tmp = cpu_to_vringh16(vrh, val);
1328 if (vrh->use_va) {
1343 static inline int copydesc_iotlb(const struct vringh *vrh,
1348 ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1355 static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1360 ret = copy_from_iotlb(vrh, dst, src, len);
1367 static inline int xfer_to_iotlb(const struct vringh *vrh,
1372 ret = copy_to_iotlb(vrh, dst, src, len);
1379 static inline int putused_iotlb(const struct vringh *vrh,
1387 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1396 * @vrh: the vringh to initialize.
1406 int vringh_init_iotlb(struct vringh *vrh, u64 features,
1412 vrh->use_va = false;
1414 return vringh_init_kern(vrh, features, num, weak_barriers,
1422 * @vrh: the vringh to initialize.
1432 int vringh_init_iotlb_va(struct vringh *vrh, u64 features,
1438 vrh->use_va = true;
1440 return vringh_init_kern(vrh, features, num, weak_barriers,
1447 * @vrh: the vring
1451 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
1454 vrh->iotlb = iotlb;
1455 vrh->iotlb_lock = iotlb_lock;
1462 * @vrh: the kernelspace vring.
1472 * *head will be vrh->vring.num. You may be able to ignore an invalid
1480 int vringh_getdesc_iotlb(struct vringh *vrh,
1488 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1493 if (err == vrh->vring.num)
1497 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1508 * @vrh: the vring.
1515 ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1519 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1525 * @vrh: the vring.
1532 ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1536 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1542 * @vrh: the vring.
1548 void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1553 vrh->last_avail_idx -= num;
1559 * @vrh: the vring.
1566 int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1570 used.id = cpu_to_vringh32(vrh, head);
1571 used.len = cpu_to_vringh32(vrh, len);
1573 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1579 * @vrh: the vring.
1584 bool vringh_notify_enable_iotlb(struct vringh *vrh)
1586 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1592 * @vrh: the vring.
1597 void vringh_notify_disable_iotlb(struct vringh *vrh)
1599 __vringh_notify_disable(vrh, putu16_iotlb);
1605 * @vrh: the vring we've called vringh_complete_iotlb() on.
1609 int vringh_need_notify_iotlb(struct vringh *vrh)
1611 return __vringh_need_notify(vrh, getu16_iotlb);