Lines Matching refs:vdev

20 		dev_err(&(_vq)->vq.vdev->dev,			\
57 dev_err(&_vq->vq.vdev->dev, \
241 static bool vring_use_dma_api(struct virtio_device *vdev)
243 if (!virtio_has_dma_quirk(vdev))
261 size_t virtio_max_dma_size(struct virtio_device *vdev)
265 if (vring_use_dma_api(vdev))
266 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
272 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
275 if (vring_use_dma_api(vdev)) {
276 return dma_alloc_coherent(vdev->dev.parent, size,
305 static void vring_free_queue(struct virtio_device *vdev, size_t size,
308 if (vring_use_dma_api(vdev))
309 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
321 return vq->vq.vdev->dev.parent;
375 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
379 virtio64_to_cpu(vq->vq.vdev, desc->addr),
380 virtio32_to_cpu(vq->vq.vdev, desc->len),
385 virtio64_to_cpu(vq->vq.vdev, desc->addr),
386 virtio32_to_cpu(vq->vq.vdev, desc->len),
411 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
487 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
488 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
489 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
491 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
500 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
501 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
502 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
504 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
508 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
532 vq->free_head = virtio16_to_cpu(_vq->vdev,
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
579 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
608 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
613 cpu_to_virtio16(_vq->vdev,
624 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
634 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
639 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
655 len = virtio32_to_cpu(vq->vq.vdev,
659 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
674 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
704 i = virtio32_to_cpu(_vq->vdev,
706 *len = virtio32_to_cpu(_vq->vdev,
728 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
744 cpu_to_virtio16(_vq->vdev,
765 cpu_to_virtio16(_vq->vdev,
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
778 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
798 cpu_to_virtio16(_vq->vdev,
806 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
849 struct virtio_device *vdev,
865 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
871 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
885 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
894 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
897 vring_free_queue(vdev, queue_size_in_bytes, queue,
1567 struct virtio_device *vdev,
1584 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1592 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1598 device = vring_alloc_queue(vdev, event_size_in_bytes,
1609 vq->vq.vdev = vdev;
1620 vq->use_dma_api = vring_use_dma_api(vdev);
1626 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1628 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1630 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1681 spin_lock(&vdev->vqs_list_lock);
1682 list_add_tail(&vq->vq.list, &vdev->vqs);
1683 spin_unlock(&vdev->vqs_list_lock);
1691 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1693 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1695 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
2070 struct virtio_device *vdev,
2080 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2089 vq->vq.vdev = vdev;
2099 vq->use_dma_api = vring_use_dma_api(vdev);
2105 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2107 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2109 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2123 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2137 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2141 spin_lock(&vdev->vqs_list_lock);
2142 list_add_tail(&vq->vq.list, &vdev->vqs);
2143 spin_unlock(&vdev->vqs_list_lock);
2152 struct virtio_device *vdev,
2161 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2163 vdev, weak_barriers, may_reduce_num,
2167 vdev, weak_barriers, may_reduce_num,
2176 struct virtio_device *vdev,
2186 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2190 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2201 vring_free_queue(vq->vq.vdev,
2206 vring_free_queue(vq->vq.vdev,
2211 vring_free_queue(vq->vq.vdev,
2219 vring_free_queue(vq->vq.vdev,
2227 spin_lock(&vq->vq.vdev->vqs_list_lock);
2229 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2235 void vring_transport_features(struct virtio_device *vdev)
2255 __virtio_clear_bit(vdev, i);