Lines Matching defs:_vq

18 #define BAD_RING(_vq, fmt, args...)				\
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
25 #define START_USE(_vq) \
27 if ((_vq)->in_use) \
29 (_vq)->vq.name, (_vq)->in_use); \
30 (_vq)->in_use = __LINE__; \
32 #define END_USE(_vq) \
33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34 #define LAST_ADD_TIME_UPDATE(_vq) \
39 if ((_vq)->last_add_time_valid) \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
45 #define LAST_ADD_TIME_CHECK(_vq) \
47 if ((_vq)->last_add_time_valid) { \
49 (_vq)->last_add_time)) > 100); \
52 #define LAST_ADD_TIME_INVALID(_vq) \
53 ((_vq)->last_add_time_valid = false)
55 #define BAD_RING(_vq, fmt, args...) \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
201 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
203 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
206 struct vring_virtqueue *vq = to_vvq(_vq);
392 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
411 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
415 static inline int virtqueue_add_split(struct virtqueue *_vq,
424 struct vring_virtqueue *vq = to_vvq(_vq);
447 if (virtqueue_use_indirect(_vq, total_sg))
448 desc = alloc_indirect_split(_vq, total_sg, gfp);
487 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
488 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
489 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
491 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
500 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
501 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
502 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
504 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
508 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
532 vq->free_head = virtio16_to_cpu(_vq->vdev,
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
563 virtqueue_kick(_vq);
579 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
589 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
591 struct vring_virtqueue *vq = to_vvq(_vq);
608 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
613 cpu_to_virtio16(_vq->vdev,
678 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
682 struct vring_virtqueue *vq = to_vvq(_vq);
704 i = virtio32_to_cpu(_vq->vdev,
706 *len = virtio32_to_cpu(_vq->vdev,
728 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
736 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
738 struct vring_virtqueue *vq = to_vvq(_vq);
744 cpu_to_virtio16(_vq->vdev,
749 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
751 struct vring_virtqueue *vq = to_vvq(_vq);
765 cpu_to_virtio16(_vq->vdev,
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
774 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
776 struct vring_virtqueue *vq = to_vvq(_vq);
778 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
782 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
784 struct vring_virtqueue *vq = to_vvq(_vq);
798 cpu_to_virtio16(_vq->vdev,
806 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
818 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
820 struct vring_virtqueue *vq = to_vvq(_vq);
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
1093 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1102 struct vring_virtqueue *vq = to_vvq(_vq);
1124 if (virtqueue_use_indirect(_vq, total_sg)) {
1242 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1244 struct vring_virtqueue *vq = to_vvq(_vq);
1356 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1360 struct vring_virtqueue *vq = to_vvq(_vq);
1421 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1423 struct vring_virtqueue *vq = to_vvq(_vq);
1432 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1434 struct vring_virtqueue *vq = to_vvq(_vq);
1468 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1470 struct vring_virtqueue *vq = to_vvq(_vq);
1480 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1482 struct vring_virtqueue *vq = to_vvq(_vq);
1539 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1541 struct vring_virtqueue *vq = to_vvq(_vq);
1705 static inline int virtqueue_add(struct virtqueue *_vq,
1714 struct vring_virtqueue *vq = to_vvq(_vq);
1716 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1718 virtqueue_add_split(_vq, sgs, total_sg,
1724 * @_vq: the struct virtqueue we're talking about.
1736 int virtqueue_add_sgs(struct virtqueue *_vq,
1752 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1827 * @_vq: the struct virtqueue
1836 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1838 struct vring_virtqueue *vq = to_vvq(_vq);
1840 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1841 virtqueue_kick_prepare_split(_vq);
1847 * @_vq: the struct virtqueue
1853 bool virtqueue_notify(struct virtqueue *_vq)
1855 struct vring_virtqueue *vq = to_vvq(_vq);
1861 if (!vq->notify(_vq)) {
1891 * @_vq: the struct virtqueue we're talking about.
1906 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1909 struct vring_virtqueue *vq = to_vvq(_vq);
1911 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1912 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1916 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1918 return virtqueue_get_buf_ctx(_vq, len, NULL);
1923 * @_vq: the struct virtqueue we're talking about.
1930 void virtqueue_disable_cb(struct virtqueue *_vq)
1932 struct vring_virtqueue *vq = to_vvq(_vq);
1935 virtqueue_disable_cb_packed(_vq);
1937 virtqueue_disable_cb_split(_vq);
1943 * @_vq: the struct virtqueue we're talking about.
1953 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1955 struct vring_virtqueue *vq = to_vvq(_vq);
1957 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1958 virtqueue_enable_cb_prepare_split(_vq);
1964 * @_vq: the struct virtqueue we're talking about.
1971 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1973 struct vring_virtqueue *vq = to_vvq(_vq);
1979 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1980 virtqueue_poll_split(_vq, last_used_idx);
1986 * @_vq: the struct virtqueue we're talking about.
1995 bool virtqueue_enable_cb(struct virtqueue *_vq)
1997 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1999 return !virtqueue_poll(_vq, last_used_idx);
2005 * @_vq: the struct virtqueue we're talking about.
2016 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2018 struct vring_virtqueue *vq = to_vvq(_vq);
2020 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2021 virtqueue_enable_cb_delayed_split(_vq);
2027 * @_vq: the struct virtqueue we're talking about.
2033 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2035 struct vring_virtqueue *vq = to_vvq(_vq);
2037 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2038 virtqueue_detach_unused_buf_split(_vq);
2047 irqreturn_t vring_interrupt(int irq, void *_vq)
2049 struct vring_virtqueue *vq = to_vvq(_vq);
2195 void vring_del_virtqueue(struct virtqueue *_vq)
2197 struct vring_virtqueue *vq = to_vvq(_vq);
2228 list_del(&_vq->list);
2263 * @_vq: the struct virtqueue containing the vring of interest.
2268 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2271 struct vring_virtqueue *vq = to_vvq(_vq);
2277 bool virtqueue_is_broken(struct virtqueue *_vq)
2279 struct vring_virtqueue *vq = to_vvq(_vq);
2291 struct virtqueue *_vq;
2294 list_for_each_entry(_vq, &dev->vqs, list) {
2295 struct vring_virtqueue *vq = to_vvq(_vq);
2304 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2306 struct vring_virtqueue *vq = to_vvq(_vq);
2317 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2319 struct vring_virtqueue *vq = to_vvq(_vq);
2331 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2333 struct vring_virtqueue *vq = to_vvq(_vq);