Lines Matching refs:vdev

22 		dev_err(&(_vq)->vq.vdev->dev,			\
59 dev_err(&_vq->vq.vdev->dev, \
228 struct virtio_device *vdev,
280 static bool vring_use_dma_api(const struct virtio_device *vdev)
282 if (!virtio_has_dma_quirk(vdev))
300 size_t virtio_max_dma_size(const struct virtio_device *vdev)
304 if (vring_use_dma_api(vdev))
305 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
311 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
315 if (vring_use_dma_api(vdev)) {
345 static void vring_free_queue(struct virtio_device *vdev, size_t size,
349 if (vring_use_dma_api(vdev))
451 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
454 virtio64_to_cpu(vq->vq.vdev, desc->addr),
455 virtio32_to_cpu(vq->vq.vdev, desc->len),
511 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
527 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
528 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
529 desc[i].len = cpu_to_virtio32(vq->vdev, len);
533 desc[i].next = cpu_to_virtio16(vq->vdev, next);
539 next = virtio16_to_cpu(vq->vdev, desc[i].next);
645 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
688 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
694 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
721 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
753 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
758 cpu_to_virtio16(_vq->vdev,
769 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
819 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
849 i = virtio32_to_cpu(_vq->vdev,
851 *len = virtio32_to_cpu(_vq->vdev,
873 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
900 cpu_to_virtio16(_vq->vdev,
921 cpu_to_virtio16(_vq->vdev,
924 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
934 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
954 cpu_to_virtio16(_vq->vdev,
962 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
964 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
989 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
1004 struct virtio_device *vdev;
1006 vdev = vq->vq.vdev;
1015 vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
1079 struct virtio_device *vdev, struct device *dma_dev)
1081 vring_free_queue(vdev, vring_split->queue_size_in_bytes,
1091 struct virtio_device *vdev,
1102 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1108 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1123 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1145 struct virtio_device *vdev,
1158 err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
1163 vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
1166 vring_free_split(&vring_split, vdev, dma_dev);
1179 struct virtio_device *vdev = _vq->vdev;
1182 err = vring_alloc_queue_split(&vring_split, vdev, num,
1203 vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
1910 struct virtio_device *vdev,
1914 vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
1920 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1926 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1936 struct virtio_device *vdev,
1946 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1959 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1970 device = vring_alloc_queue(vdev, event_size_in_bytes,
1985 vring_free_packed(vring_packed, vdev, dma_dev);
2057 struct virtio_device *vdev,
2070 if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
2078 vq->vq.vdev = vdev;
2092 vq->use_dma_api = vring_use_dma_api(vdev);
2096 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2098 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2100 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2112 spin_lock(&vdev->vqs_list_lock);
2113 list_add_tail(&vq->vq.list, &vdev->vqs);
2114 spin_unlock(&vdev->vqs_list_lock);
2120 vring_free_packed(&vring_packed, vdev, dma_dev);
2129 struct virtio_device *vdev = _vq->vdev;
2132 if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
2149 vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
2159 struct virtio_device *vdev = vq->vq.vdev;
2166 if (!vdev->config->disable_vq_and_reset)
2169 if (!vdev->config->enable_vq_after_reset)
2172 err = vdev->config->disable_vq_and_reset(_vq);
2185 struct virtio_device *vdev = vq->vq.vdev;
2187 if (vdev->config->enable_vq_after_reset(_vq))
2581 dev_warn_once(&vq->vq.vdev->dev,
2604 struct virtio_device *vdev,
2615 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2624 vq->vq.vdev = vdev;
2637 vq->use_dma_api = vring_use_dma_api(vdev);
2641 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2643 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2645 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2659 spin_lock(&vdev->vqs_list_lock);
2660 list_add_tail(&vq->vq.list, &vdev->vqs);
2661 spin_unlock(&vdev->vqs_list_lock);
2669 struct virtio_device *vdev,
2678 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2680 vdev, weak_barriers, may_reduce_num,
2681 context, notify, callback, name, vdev->dev.parent);
2684 vdev, weak_barriers, may_reduce_num,
2685 context, notify, callback, name, vdev->dev.parent);
2693 struct virtio_device *vdev,
2703 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2705 vdev, weak_barriers, may_reduce_num,
2709 vdev, weak_barriers, may_reduce_num,
2852 struct virtio_device *vdev,
2862 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2866 return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
2868 vdev->dev.parent);
2878 vring_free_queue(vq->vq.vdev,
2884 vring_free_queue(vq->vq.vdev,
2890 vring_free_queue(vq->vq.vdev,
2899 vring_free_queue(vq->vq.vdev,
2916 spin_lock(&vq->vq.vdev->vqs_list_lock);
2918 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2944 void vring_transport_features(struct virtio_device *vdev)
2966 __virtio_clear_bit(vdev, i);