Lines Matching refs:vring

120 			struct vring vring;
147 } vring;
316 * making all of the arch DMA ops work on the vring device itself
451 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
462 desc = vq->split.vring.desc;
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
533 vq->split.vring.desc[head].next);
546 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
609 vring_avail_event(&vq->split.vring)),
612 needs_kick = !(vq->split.vring.used->flags &
632 while (vq->split.vring.desc[i].flags & nextflag) {
633 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
634 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
638 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
639 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
656 vq->split.vring.desc[head].len);
658 BUG_ON(!(vq->split.vring.desc[head].flags &
675 vq->split.vring.used->idx);
703 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
705 vq->split.vring.used->ring[last_used].id);
707 vq->split.vring.used->ring[last_used].len);
709 if (unlikely(i >= vq->split.vring.num)) {
727 &vring_used_event(&vq->split.vring),
743 vq->split.vring.avail->flags =
764 vq->split.vring.avail->flags =
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
779 vq->split.vring.used->idx);
797 vq->split.vring.avail->flags =
805 &vring_used_event(&vq->split.vring),
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
826 for (i = 0; i < vq->split.vring.num; i++) {
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
839 BUG_ON(vq->vq.num_free != vq->split.vring.num);
861 struct vring vring;
892 vring_init(&vring, num, queue, vring_align);
894 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1007 BUG_ON(id == vq->packed.vring.num);
1031 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1032 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1034 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1050 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1058 if (n >= vq->packed.vring.num) {
1138 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1140 desc = vq->packed.vring.desc;
1152 BUG_ON(id == vq->packed.vring.num);
1184 if ((unlikely(++i >= vq->packed.vring.num))) {
1215 vq->packed.vring.desc[head].flags = head_flags;
1234 if (i >= vq->packed.vring.num)
1267 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1283 event_idx -= vq->packed.vring.num;
1343 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1381 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1382 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1384 if (unlikely(id >= vq->packed.vring.num)) {
1398 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1399 vq->last_used_idx -= vq->packed.vring.num;
1410 &vq->packed.vring.driver->off_wrap,
1427 vq->packed.vring.driver->flags =
1444 vq->packed.vring.driver->off_wrap =
1459 vq->packed.vring.driver->flags =
1495 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1499 if (used_idx >= vq->packed.vring.num) {
1500 used_idx -= vq->packed.vring.num;
1504 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1518 vq->packed.vring.driver->flags =
1547 for (i = 0; i < vq->packed.vring.num; i++) {
1557 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1640 vq->packed.vring.num = num;
1641 vq->packed.vring.desc = ring;
1642 vq->packed.vring.driver = driver;
1643 vq->packed.vring.device = device;
1677 vq->packed.vring.driver->flags =
2069 struct vring vring,
2091 vq->vq.num_free = vring.num;
2115 vq->split.vring = vring;
2123 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2127 vq->split.desc_state = kmalloc_array(vring.num,
2136 for (i = 0; i < vring.num-1; i++)
2137 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2138 memset(vq->split.desc_state, 0, vring.num *
2184 struct vring vring;
2189 vring_init(&vring, num, pages, vring_align);
2190 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2203 vq->packed.vring.desc,
2208 vq->packed.vring.driver,
2213 vq->packed.vring.device,
2221 vq->split.vring.desc,
2262 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2263 * @_vq: the struct virtqueue containing the vring of interest.
2265 * Returns the size of the vring. This is mainly used for boasting to
2273 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2327 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2341 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2346 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2348 return &to_vvq(vq)->split.vring;