18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
28c2ecf20Sopenharmony_ci/* Virtio ring implementation.
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci *  Copyright 2007 Rusty Russell IBM Corporation
58c2ecf20Sopenharmony_ci */
68c2ecf20Sopenharmony_ci#include <linux/virtio.h>
78c2ecf20Sopenharmony_ci#include <linux/virtio_ring.h>
88c2ecf20Sopenharmony_ci#include <linux/virtio_config.h>
98c2ecf20Sopenharmony_ci#include <linux/device.h>
108c2ecf20Sopenharmony_ci#include <linux/slab.h>
118c2ecf20Sopenharmony_ci#include <linux/module.h>
128c2ecf20Sopenharmony_ci#include <linux/hrtimer.h>
138c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h>
148c2ecf20Sopenharmony_ci#include <xen/xen.h>
158c2ecf20Sopenharmony_ci
168c2ecf20Sopenharmony_ci#ifdef DEBUG
178c2ecf20Sopenharmony_ci/* For development, we want to crash whenever the ring is screwed. */
188c2ecf20Sopenharmony_ci#define BAD_RING(_vq, fmt, args...)				\
198c2ecf20Sopenharmony_ci	do {							\
208c2ecf20Sopenharmony_ci		dev_err(&(_vq)->vq.vdev->dev,			\
218c2ecf20Sopenharmony_ci			"%s:"fmt, (_vq)->vq.name, ##args);	\
228c2ecf20Sopenharmony_ci		BUG();						\
238c2ecf20Sopenharmony_ci	} while (0)
248c2ecf20Sopenharmony_ci/* Caller is supposed to guarantee no reentry. */
258c2ecf20Sopenharmony_ci#define START_USE(_vq)						\
268c2ecf20Sopenharmony_ci	do {							\
278c2ecf20Sopenharmony_ci		if ((_vq)->in_use)				\
288c2ecf20Sopenharmony_ci			panic("%s:in_use = %i\n",		\
298c2ecf20Sopenharmony_ci			      (_vq)->vq.name, (_vq)->in_use);	\
308c2ecf20Sopenharmony_ci		(_vq)->in_use = __LINE__;			\
318c2ecf20Sopenharmony_ci	} while (0)
328c2ecf20Sopenharmony_ci#define END_USE(_vq) \
338c2ecf20Sopenharmony_ci	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
348c2ecf20Sopenharmony_ci#define LAST_ADD_TIME_UPDATE(_vq)				\
358c2ecf20Sopenharmony_ci	do {							\
368c2ecf20Sopenharmony_ci		ktime_t now = ktime_get();			\
378c2ecf20Sopenharmony_ci								\
388c2ecf20Sopenharmony_ci		/* No kick or get, with .1 second between?  Warn. */ \
398c2ecf20Sopenharmony_ci		if ((_vq)->last_add_time_valid)			\
408c2ecf20Sopenharmony_ci			WARN_ON(ktime_to_ms(ktime_sub(now,	\
418c2ecf20Sopenharmony_ci				(_vq)->last_add_time)) > 100);	\
428c2ecf20Sopenharmony_ci		(_vq)->last_add_time = now;			\
438c2ecf20Sopenharmony_ci		(_vq)->last_add_time_valid = true;		\
448c2ecf20Sopenharmony_ci	} while (0)
458c2ecf20Sopenharmony_ci#define LAST_ADD_TIME_CHECK(_vq)				\
468c2ecf20Sopenharmony_ci	do {							\
478c2ecf20Sopenharmony_ci		if ((_vq)->last_add_time_valid) {		\
488c2ecf20Sopenharmony_ci			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
498c2ecf20Sopenharmony_ci				      (_vq)->last_add_time)) > 100); \
508c2ecf20Sopenharmony_ci		}						\
518c2ecf20Sopenharmony_ci	} while (0)
528c2ecf20Sopenharmony_ci#define LAST_ADD_TIME_INVALID(_vq)				\
538c2ecf20Sopenharmony_ci	((_vq)->last_add_time_valid = false)
548c2ecf20Sopenharmony_ci#else
558c2ecf20Sopenharmony_ci#define BAD_RING(_vq, fmt, args...)				\
568c2ecf20Sopenharmony_ci	do {							\
578c2ecf20Sopenharmony_ci		dev_err(&_vq->vq.vdev->dev,			\
588c2ecf20Sopenharmony_ci			"%s:"fmt, (_vq)->vq.name, ##args);	\
598c2ecf20Sopenharmony_ci		(_vq)->broken = true;				\
608c2ecf20Sopenharmony_ci	} while (0)
618c2ecf20Sopenharmony_ci#define START_USE(vq)
628c2ecf20Sopenharmony_ci#define END_USE(vq)
638c2ecf20Sopenharmony_ci#define LAST_ADD_TIME_UPDATE(vq)
648c2ecf20Sopenharmony_ci#define LAST_ADD_TIME_CHECK(vq)
658c2ecf20Sopenharmony_ci#define LAST_ADD_TIME_INVALID(vq)
668c2ecf20Sopenharmony_ci#endif
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_cistruct vring_desc_state_split {
698c2ecf20Sopenharmony_ci	void *data;			/* Data for callback. */
708c2ecf20Sopenharmony_ci	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
718c2ecf20Sopenharmony_ci};
728c2ecf20Sopenharmony_ci
738c2ecf20Sopenharmony_cistruct vring_desc_state_packed {
748c2ecf20Sopenharmony_ci	void *data;			/* Data for callback. */
758c2ecf20Sopenharmony_ci	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
768c2ecf20Sopenharmony_ci	u16 num;			/* Descriptor list length. */
778c2ecf20Sopenharmony_ci	u16 next;			/* The next desc state in a list. */
788c2ecf20Sopenharmony_ci	u16 last;			/* The last desc state in a list. */
798c2ecf20Sopenharmony_ci};
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_cistruct vring_desc_extra_packed {
828c2ecf20Sopenharmony_ci	dma_addr_t addr;		/* Buffer DMA addr. */
838c2ecf20Sopenharmony_ci	u32 len;			/* Buffer length. */
848c2ecf20Sopenharmony_ci	u16 flags;			/* Descriptor flags. */
858c2ecf20Sopenharmony_ci};
868c2ecf20Sopenharmony_ci
878c2ecf20Sopenharmony_cistruct vring_virtqueue {
888c2ecf20Sopenharmony_ci	struct virtqueue vq;
898c2ecf20Sopenharmony_ci
908c2ecf20Sopenharmony_ci	/* Is this a packed ring? */
918c2ecf20Sopenharmony_ci	bool packed_ring;
928c2ecf20Sopenharmony_ci
938c2ecf20Sopenharmony_ci	/* Is DMA API used? */
948c2ecf20Sopenharmony_ci	bool use_dma_api;
958c2ecf20Sopenharmony_ci
968c2ecf20Sopenharmony_ci	/* Can we use weak barriers? */
978c2ecf20Sopenharmony_ci	bool weak_barriers;
988c2ecf20Sopenharmony_ci
998c2ecf20Sopenharmony_ci	/* Other side has made a mess, don't try any more. */
1008c2ecf20Sopenharmony_ci	bool broken;
1018c2ecf20Sopenharmony_ci
1028c2ecf20Sopenharmony_ci	/* Host supports indirect buffers */
1038c2ecf20Sopenharmony_ci	bool indirect;
1048c2ecf20Sopenharmony_ci
1058c2ecf20Sopenharmony_ci	/* Host publishes avail event idx */
1068c2ecf20Sopenharmony_ci	bool event;
1078c2ecf20Sopenharmony_ci
1088c2ecf20Sopenharmony_ci	/* Head of free buffer list. */
1098c2ecf20Sopenharmony_ci	unsigned int free_head;
1108c2ecf20Sopenharmony_ci	/* Number we've added since last sync. */
1118c2ecf20Sopenharmony_ci	unsigned int num_added;
1128c2ecf20Sopenharmony_ci
1138c2ecf20Sopenharmony_ci	/* Last used index we've seen. */
1148c2ecf20Sopenharmony_ci	u16 last_used_idx;
1158c2ecf20Sopenharmony_ci
1168c2ecf20Sopenharmony_ci	union {
1178c2ecf20Sopenharmony_ci		/* Available for split ring */
1188c2ecf20Sopenharmony_ci		struct {
1198c2ecf20Sopenharmony_ci			/* Actual memory layout for this queue. */
1208c2ecf20Sopenharmony_ci			struct vring vring;
1218c2ecf20Sopenharmony_ci
1228c2ecf20Sopenharmony_ci			/* Last written value to avail->flags */
1238c2ecf20Sopenharmony_ci			u16 avail_flags_shadow;
1248c2ecf20Sopenharmony_ci
1258c2ecf20Sopenharmony_ci			/*
1268c2ecf20Sopenharmony_ci			 * Last written value to avail->idx in
1278c2ecf20Sopenharmony_ci			 * guest byte order.
1288c2ecf20Sopenharmony_ci			 */
1298c2ecf20Sopenharmony_ci			u16 avail_idx_shadow;
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci			/* Per-descriptor state. */
1328c2ecf20Sopenharmony_ci			struct vring_desc_state_split *desc_state;
1338c2ecf20Sopenharmony_ci
1348c2ecf20Sopenharmony_ci			/* DMA address and size information */
1358c2ecf20Sopenharmony_ci			dma_addr_t queue_dma_addr;
1368c2ecf20Sopenharmony_ci			size_t queue_size_in_bytes;
1378c2ecf20Sopenharmony_ci		} split;
1388c2ecf20Sopenharmony_ci
1398c2ecf20Sopenharmony_ci		/* Available for packed ring */
1408c2ecf20Sopenharmony_ci		struct {
1418c2ecf20Sopenharmony_ci			/* Actual memory layout for this queue. */
1428c2ecf20Sopenharmony_ci			struct {
1438c2ecf20Sopenharmony_ci				unsigned int num;
1448c2ecf20Sopenharmony_ci				struct vring_packed_desc *desc;
1458c2ecf20Sopenharmony_ci				struct vring_packed_desc_event *driver;
1468c2ecf20Sopenharmony_ci				struct vring_packed_desc_event *device;
1478c2ecf20Sopenharmony_ci			} vring;
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci			/* Driver ring wrap counter. */
1508c2ecf20Sopenharmony_ci			bool avail_wrap_counter;
1518c2ecf20Sopenharmony_ci
1528c2ecf20Sopenharmony_ci			/* Device ring wrap counter. */
1538c2ecf20Sopenharmony_ci			bool used_wrap_counter;
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci			/* Avail used flags. */
1568c2ecf20Sopenharmony_ci			u16 avail_used_flags;
1578c2ecf20Sopenharmony_ci
1588c2ecf20Sopenharmony_ci			/* Index of the next avail descriptor. */
1598c2ecf20Sopenharmony_ci			u16 next_avail_idx;
1608c2ecf20Sopenharmony_ci
1618c2ecf20Sopenharmony_ci			/*
1628c2ecf20Sopenharmony_ci			 * Last written value to driver->flags in
1638c2ecf20Sopenharmony_ci			 * guest byte order.
1648c2ecf20Sopenharmony_ci			 */
1658c2ecf20Sopenharmony_ci			u16 event_flags_shadow;
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_ci			/* Per-descriptor state. */
1688c2ecf20Sopenharmony_ci			struct vring_desc_state_packed *desc_state;
1698c2ecf20Sopenharmony_ci			struct vring_desc_extra_packed *desc_extra;
1708c2ecf20Sopenharmony_ci
1718c2ecf20Sopenharmony_ci			/* DMA address and size information */
1728c2ecf20Sopenharmony_ci			dma_addr_t ring_dma_addr;
1738c2ecf20Sopenharmony_ci			dma_addr_t driver_event_dma_addr;
1748c2ecf20Sopenharmony_ci			dma_addr_t device_event_dma_addr;
1758c2ecf20Sopenharmony_ci			size_t ring_size_in_bytes;
1768c2ecf20Sopenharmony_ci			size_t event_size_in_bytes;
1778c2ecf20Sopenharmony_ci		} packed;
1788c2ecf20Sopenharmony_ci	};
1798c2ecf20Sopenharmony_ci
1808c2ecf20Sopenharmony_ci	/* How to notify other side. FIXME: commonalize hcalls! */
1818c2ecf20Sopenharmony_ci	bool (*notify)(struct virtqueue *vq);
1828c2ecf20Sopenharmony_ci
1838c2ecf20Sopenharmony_ci	/* DMA, allocation, and size information */
1848c2ecf20Sopenharmony_ci	bool we_own_ring;
1858c2ecf20Sopenharmony_ci
1868c2ecf20Sopenharmony_ci#ifdef DEBUG
1878c2ecf20Sopenharmony_ci	/* They're supposed to lock for us. */
1888c2ecf20Sopenharmony_ci	unsigned int in_use;
1898c2ecf20Sopenharmony_ci
1908c2ecf20Sopenharmony_ci	/* Figure out if their kicks are too delayed. */
1918c2ecf20Sopenharmony_ci	bool last_add_time_valid;
1928c2ecf20Sopenharmony_ci	ktime_t last_add_time;
1938c2ecf20Sopenharmony_ci#endif
1948c2ecf20Sopenharmony_ci};
1958c2ecf20Sopenharmony_ci
1968c2ecf20Sopenharmony_ci
1978c2ecf20Sopenharmony_ci/*
1988c2ecf20Sopenharmony_ci * Helpers.
1998c2ecf20Sopenharmony_ci */
2008c2ecf20Sopenharmony_ci
2018c2ecf20Sopenharmony_ci#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
2028c2ecf20Sopenharmony_ci
2038c2ecf20Sopenharmony_cistatic inline bool virtqueue_use_indirect(struct virtqueue *_vq,
2048c2ecf20Sopenharmony_ci					  unsigned int total_sg)
2058c2ecf20Sopenharmony_ci{
2068c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
2078c2ecf20Sopenharmony_ci
2088c2ecf20Sopenharmony_ci	/*
2098c2ecf20Sopenharmony_ci	 * If the host supports indirect descriptor tables, and we have multiple
2108c2ecf20Sopenharmony_ci	 * buffers, then go indirect. FIXME: tune this threshold
2118c2ecf20Sopenharmony_ci	 */
2128c2ecf20Sopenharmony_ci	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2138c2ecf20Sopenharmony_ci}
2148c2ecf20Sopenharmony_ci
2158c2ecf20Sopenharmony_ci/*
2168c2ecf20Sopenharmony_ci * Modern virtio devices have feature bits to specify whether they need a
2178c2ecf20Sopenharmony_ci * quirk and bypass the IOMMU. If not there, just use the DMA API.
2188c2ecf20Sopenharmony_ci *
2198c2ecf20Sopenharmony_ci * If there, the interaction between virtio and DMA API is messy.
2208c2ecf20Sopenharmony_ci *
2218c2ecf20Sopenharmony_ci * On most systems with virtio, physical addresses match bus addresses,
2228c2ecf20Sopenharmony_ci * and it doesn't particularly matter whether we use the DMA API.
2238c2ecf20Sopenharmony_ci *
2248c2ecf20Sopenharmony_ci * On some systems, including Xen and any system with a physical device
2258c2ecf20Sopenharmony_ci * that speaks virtio behind a physical IOMMU, we must use the DMA API
2268c2ecf20Sopenharmony_ci * for virtio DMA to work at all.
2278c2ecf20Sopenharmony_ci *
2288c2ecf20Sopenharmony_ci * On other systems, including SPARC and PPC64, virtio-pci devices are
2298c2ecf20Sopenharmony_ci * enumerated as though they are behind an IOMMU, but the virtio host
2308c2ecf20Sopenharmony_ci * ignores the IOMMU, so we must either pretend that the IOMMU isn't
2318c2ecf20Sopenharmony_ci * there or somehow map everything as the identity.
2328c2ecf20Sopenharmony_ci *
2338c2ecf20Sopenharmony_ci * For the time being, we preserve historic behavior and bypass the DMA
2348c2ecf20Sopenharmony_ci * API.
2358c2ecf20Sopenharmony_ci *
2368c2ecf20Sopenharmony_ci * TODO: install a per-device DMA ops structure that does the right thing
2378c2ecf20Sopenharmony_ci * taking into account all the above quirks, and use the DMA API
2388c2ecf20Sopenharmony_ci * unconditionally on data path.
2398c2ecf20Sopenharmony_ci */
2408c2ecf20Sopenharmony_ci
2418c2ecf20Sopenharmony_cistatic bool vring_use_dma_api(struct virtio_device *vdev)
2428c2ecf20Sopenharmony_ci{
2438c2ecf20Sopenharmony_ci	if (!virtio_has_dma_quirk(vdev))
2448c2ecf20Sopenharmony_ci		return true;
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_ci	/* Otherwise, we are left to guess. */
2478c2ecf20Sopenharmony_ci	/*
2488c2ecf20Sopenharmony_ci	 * In theory, it's possible to have a buggy QEMU-supposed
2498c2ecf20Sopenharmony_ci	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
2508c2ecf20Sopenharmony_ci	 * such a configuration, virtio has never worked and will
2518c2ecf20Sopenharmony_ci	 * not work without an even larger kludge.  Instead, enable
2528c2ecf20Sopenharmony_ci	 * the DMA API if we're a Xen guest, which at least allows
2538c2ecf20Sopenharmony_ci	 * all of the sensible Xen configurations to work correctly.
2548c2ecf20Sopenharmony_ci	 */
2558c2ecf20Sopenharmony_ci	if (xen_domain())
2568c2ecf20Sopenharmony_ci		return true;
2578c2ecf20Sopenharmony_ci
2588c2ecf20Sopenharmony_ci	return false;
2598c2ecf20Sopenharmony_ci}
2608c2ecf20Sopenharmony_ci
2618c2ecf20Sopenharmony_cisize_t virtio_max_dma_size(struct virtio_device *vdev)
2628c2ecf20Sopenharmony_ci{
2638c2ecf20Sopenharmony_ci	size_t max_segment_size = SIZE_MAX;
2648c2ecf20Sopenharmony_ci
2658c2ecf20Sopenharmony_ci	if (vring_use_dma_api(vdev))
2668c2ecf20Sopenharmony_ci		max_segment_size = dma_max_mapping_size(vdev->dev.parent);
2678c2ecf20Sopenharmony_ci
2688c2ecf20Sopenharmony_ci	return max_segment_size;
2698c2ecf20Sopenharmony_ci}
2708c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtio_max_dma_size);
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_cistatic void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
2738c2ecf20Sopenharmony_ci			      dma_addr_t *dma_handle, gfp_t flag)
2748c2ecf20Sopenharmony_ci{
2758c2ecf20Sopenharmony_ci	if (vring_use_dma_api(vdev)) {
2768c2ecf20Sopenharmony_ci		return dma_alloc_coherent(vdev->dev.parent, size,
2778c2ecf20Sopenharmony_ci					  dma_handle, flag);
2788c2ecf20Sopenharmony_ci	} else {
2798c2ecf20Sopenharmony_ci		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
2808c2ecf20Sopenharmony_ci
2818c2ecf20Sopenharmony_ci		if (queue) {
2828c2ecf20Sopenharmony_ci			phys_addr_t phys_addr = virt_to_phys(queue);
2838c2ecf20Sopenharmony_ci			*dma_handle = (dma_addr_t)phys_addr;
2848c2ecf20Sopenharmony_ci
2858c2ecf20Sopenharmony_ci			/*
2868c2ecf20Sopenharmony_ci			 * Sanity check: make sure we dind't truncate
2878c2ecf20Sopenharmony_ci			 * the address.  The only arches I can find that
2888c2ecf20Sopenharmony_ci			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
2898c2ecf20Sopenharmony_ci			 * are certain non-highmem MIPS and x86
2908c2ecf20Sopenharmony_ci			 * configurations, but these configurations
2918c2ecf20Sopenharmony_ci			 * should never allocate physical pages above 32
2928c2ecf20Sopenharmony_ci			 * bits, so this is fine.  Just in case, throw a
2938c2ecf20Sopenharmony_ci			 * warning and abort if we end up with an
2948c2ecf20Sopenharmony_ci			 * unrepresentable address.
2958c2ecf20Sopenharmony_ci			 */
2968c2ecf20Sopenharmony_ci			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
2978c2ecf20Sopenharmony_ci				free_pages_exact(queue, PAGE_ALIGN(size));
2988c2ecf20Sopenharmony_ci				return NULL;
2998c2ecf20Sopenharmony_ci			}
3008c2ecf20Sopenharmony_ci		}
3018c2ecf20Sopenharmony_ci		return queue;
3028c2ecf20Sopenharmony_ci	}
3038c2ecf20Sopenharmony_ci}
3048c2ecf20Sopenharmony_ci
3058c2ecf20Sopenharmony_cistatic void vring_free_queue(struct virtio_device *vdev, size_t size,
3068c2ecf20Sopenharmony_ci			     void *queue, dma_addr_t dma_handle)
3078c2ecf20Sopenharmony_ci{
3088c2ecf20Sopenharmony_ci	if (vring_use_dma_api(vdev))
3098c2ecf20Sopenharmony_ci		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
3108c2ecf20Sopenharmony_ci	else
3118c2ecf20Sopenharmony_ci		free_pages_exact(queue, PAGE_ALIGN(size));
3128c2ecf20Sopenharmony_ci}
3138c2ecf20Sopenharmony_ci
3148c2ecf20Sopenharmony_ci/*
3158c2ecf20Sopenharmony_ci * The DMA ops on various arches are rather gnarly right now, and
3168c2ecf20Sopenharmony_ci * making all of the arch DMA ops work on the vring device itself
3178c2ecf20Sopenharmony_ci * is a mess.  For now, we use the parent device for DMA ops.
3188c2ecf20Sopenharmony_ci */
3198c2ecf20Sopenharmony_cistatic inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
3208c2ecf20Sopenharmony_ci{
3218c2ecf20Sopenharmony_ci	return vq->vq.vdev->dev.parent;
3228c2ecf20Sopenharmony_ci}
3238c2ecf20Sopenharmony_ci
3248c2ecf20Sopenharmony_ci/* Map one sg entry. */
3258c2ecf20Sopenharmony_cistatic dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
3268c2ecf20Sopenharmony_ci				   struct scatterlist *sg,
3278c2ecf20Sopenharmony_ci				   enum dma_data_direction direction)
3288c2ecf20Sopenharmony_ci{
3298c2ecf20Sopenharmony_ci	if (!vq->use_dma_api)
3308c2ecf20Sopenharmony_ci		return (dma_addr_t)sg_phys(sg);
3318c2ecf20Sopenharmony_ci
3328c2ecf20Sopenharmony_ci	/*
3338c2ecf20Sopenharmony_ci	 * We can't use dma_map_sg, because we don't use scatterlists in
3348c2ecf20Sopenharmony_ci	 * the way it expects (we don't guarantee that the scatterlist
3358c2ecf20Sopenharmony_ci	 * will exist for the lifetime of the mapping).
3368c2ecf20Sopenharmony_ci	 */
3378c2ecf20Sopenharmony_ci	return dma_map_page(vring_dma_dev(vq),
3388c2ecf20Sopenharmony_ci			    sg_page(sg), sg->offset, sg->length,
3398c2ecf20Sopenharmony_ci			    direction);
3408c2ecf20Sopenharmony_ci}
3418c2ecf20Sopenharmony_ci
3428c2ecf20Sopenharmony_cistatic dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
3438c2ecf20Sopenharmony_ci				   void *cpu_addr, size_t size,
3448c2ecf20Sopenharmony_ci				   enum dma_data_direction direction)
3458c2ecf20Sopenharmony_ci{
3468c2ecf20Sopenharmony_ci	if (!vq->use_dma_api)
3478c2ecf20Sopenharmony_ci		return (dma_addr_t)virt_to_phys(cpu_addr);
3488c2ecf20Sopenharmony_ci
3498c2ecf20Sopenharmony_ci	return dma_map_single(vring_dma_dev(vq),
3508c2ecf20Sopenharmony_ci			      cpu_addr, size, direction);
3518c2ecf20Sopenharmony_ci}
3528c2ecf20Sopenharmony_ci
3538c2ecf20Sopenharmony_cistatic int vring_mapping_error(const struct vring_virtqueue *vq,
3548c2ecf20Sopenharmony_ci			       dma_addr_t addr)
3558c2ecf20Sopenharmony_ci{
3568c2ecf20Sopenharmony_ci	if (!vq->use_dma_api)
3578c2ecf20Sopenharmony_ci		return 0;
3588c2ecf20Sopenharmony_ci
3598c2ecf20Sopenharmony_ci	return dma_mapping_error(vring_dma_dev(vq), addr);
3608c2ecf20Sopenharmony_ci}
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci
3638c2ecf20Sopenharmony_ci/*
3648c2ecf20Sopenharmony_ci * Split ring specific functions - *_split().
3658c2ecf20Sopenharmony_ci */
3668c2ecf20Sopenharmony_ci
3678c2ecf20Sopenharmony_cistatic void vring_unmap_one_split(const struct vring_virtqueue *vq,
3688c2ecf20Sopenharmony_ci				  struct vring_desc *desc)
3698c2ecf20Sopenharmony_ci{
3708c2ecf20Sopenharmony_ci	u16 flags;
3718c2ecf20Sopenharmony_ci
3728c2ecf20Sopenharmony_ci	if (!vq->use_dma_api)
3738c2ecf20Sopenharmony_ci		return;
3748c2ecf20Sopenharmony_ci
3758c2ecf20Sopenharmony_ci	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
3768c2ecf20Sopenharmony_ci
3778c2ecf20Sopenharmony_ci	if (flags & VRING_DESC_F_INDIRECT) {
3788c2ecf20Sopenharmony_ci		dma_unmap_single(vring_dma_dev(vq),
3798c2ecf20Sopenharmony_ci				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
3808c2ecf20Sopenharmony_ci				 virtio32_to_cpu(vq->vq.vdev, desc->len),
3818c2ecf20Sopenharmony_ci				 (flags & VRING_DESC_F_WRITE) ?
3828c2ecf20Sopenharmony_ci				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
3838c2ecf20Sopenharmony_ci	} else {
3848c2ecf20Sopenharmony_ci		dma_unmap_page(vring_dma_dev(vq),
3858c2ecf20Sopenharmony_ci			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
3868c2ecf20Sopenharmony_ci			       virtio32_to_cpu(vq->vq.vdev, desc->len),
3878c2ecf20Sopenharmony_ci			       (flags & VRING_DESC_F_WRITE) ?
3888c2ecf20Sopenharmony_ci			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
3898c2ecf20Sopenharmony_ci	}
3908c2ecf20Sopenharmony_ci}
3918c2ecf20Sopenharmony_ci
3928c2ecf20Sopenharmony_cistatic struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
3938c2ecf20Sopenharmony_ci					       unsigned int total_sg,
3948c2ecf20Sopenharmony_ci					       gfp_t gfp)
3958c2ecf20Sopenharmony_ci{
3968c2ecf20Sopenharmony_ci	struct vring_desc *desc;
3978c2ecf20Sopenharmony_ci	unsigned int i;
3988c2ecf20Sopenharmony_ci
3998c2ecf20Sopenharmony_ci	/*
4008c2ecf20Sopenharmony_ci	 * We require lowmem mappings for the descriptors because
4018c2ecf20Sopenharmony_ci	 * otherwise virt_to_phys will give us bogus addresses in the
4028c2ecf20Sopenharmony_ci	 * virtqueue.
4038c2ecf20Sopenharmony_ci	 */
4048c2ecf20Sopenharmony_ci	gfp &= ~__GFP_HIGHMEM;
4058c2ecf20Sopenharmony_ci
4068c2ecf20Sopenharmony_ci	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
4078c2ecf20Sopenharmony_ci	if (!desc)
4088c2ecf20Sopenharmony_ci		return NULL;
4098c2ecf20Sopenharmony_ci
4108c2ecf20Sopenharmony_ci	for (i = 0; i < total_sg; i++)
4118c2ecf20Sopenharmony_ci		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
4128c2ecf20Sopenharmony_ci	return desc;
4138c2ecf20Sopenharmony_ci}
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_cistatic inline int virtqueue_add_split(struct virtqueue *_vq,
4168c2ecf20Sopenharmony_ci				      struct scatterlist *sgs[],
4178c2ecf20Sopenharmony_ci				      unsigned int total_sg,
4188c2ecf20Sopenharmony_ci				      unsigned int out_sgs,
4198c2ecf20Sopenharmony_ci				      unsigned int in_sgs,
4208c2ecf20Sopenharmony_ci				      void *data,
4218c2ecf20Sopenharmony_ci				      void *ctx,
4228c2ecf20Sopenharmony_ci				      gfp_t gfp)
4238c2ecf20Sopenharmony_ci{
4248c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
4258c2ecf20Sopenharmony_ci	struct scatterlist *sg;
4268c2ecf20Sopenharmony_ci	struct vring_desc *desc;
4278c2ecf20Sopenharmony_ci	unsigned int i, n, avail, descs_used, prev, err_idx;
4288c2ecf20Sopenharmony_ci	int head;
4298c2ecf20Sopenharmony_ci	bool indirect;
4308c2ecf20Sopenharmony_ci
4318c2ecf20Sopenharmony_ci	START_USE(vq);
4328c2ecf20Sopenharmony_ci
4338c2ecf20Sopenharmony_ci	BUG_ON(data == NULL);
4348c2ecf20Sopenharmony_ci	BUG_ON(ctx && vq->indirect);
4358c2ecf20Sopenharmony_ci
4368c2ecf20Sopenharmony_ci	if (unlikely(vq->broken)) {
4378c2ecf20Sopenharmony_ci		END_USE(vq);
4388c2ecf20Sopenharmony_ci		return -EIO;
4398c2ecf20Sopenharmony_ci	}
4408c2ecf20Sopenharmony_ci
4418c2ecf20Sopenharmony_ci	LAST_ADD_TIME_UPDATE(vq);
4428c2ecf20Sopenharmony_ci
4438c2ecf20Sopenharmony_ci	BUG_ON(total_sg == 0);
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci	head = vq->free_head;
4468c2ecf20Sopenharmony_ci
4478c2ecf20Sopenharmony_ci	if (virtqueue_use_indirect(_vq, total_sg))
4488c2ecf20Sopenharmony_ci		desc = alloc_indirect_split(_vq, total_sg, gfp);
4498c2ecf20Sopenharmony_ci	else {
4508c2ecf20Sopenharmony_ci		desc = NULL;
4518c2ecf20Sopenharmony_ci		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
4528c2ecf20Sopenharmony_ci	}
4538c2ecf20Sopenharmony_ci
4548c2ecf20Sopenharmony_ci	if (desc) {
4558c2ecf20Sopenharmony_ci		/* Use a single buffer which doesn't continue */
4568c2ecf20Sopenharmony_ci		indirect = true;
4578c2ecf20Sopenharmony_ci		/* Set up rest to use this indirect table. */
4588c2ecf20Sopenharmony_ci		i = 0;
4598c2ecf20Sopenharmony_ci		descs_used = 1;
4608c2ecf20Sopenharmony_ci	} else {
4618c2ecf20Sopenharmony_ci		indirect = false;
4628c2ecf20Sopenharmony_ci		desc = vq->split.vring.desc;
4638c2ecf20Sopenharmony_ci		i = head;
4648c2ecf20Sopenharmony_ci		descs_used = total_sg;
4658c2ecf20Sopenharmony_ci	}
4668c2ecf20Sopenharmony_ci
4678c2ecf20Sopenharmony_ci	if (vq->vq.num_free < descs_used) {
4688c2ecf20Sopenharmony_ci		pr_debug("Can't add buf len %i - avail = %i\n",
4698c2ecf20Sopenharmony_ci			 descs_used, vq->vq.num_free);
4708c2ecf20Sopenharmony_ci		/* FIXME: for historical reasons, we force a notify here if
4718c2ecf20Sopenharmony_ci		 * there are outgoing parts to the buffer.  Presumably the
4728c2ecf20Sopenharmony_ci		 * host should service the ring ASAP. */
4738c2ecf20Sopenharmony_ci		if (out_sgs)
4748c2ecf20Sopenharmony_ci			vq->notify(&vq->vq);
4758c2ecf20Sopenharmony_ci		if (indirect)
4768c2ecf20Sopenharmony_ci			kfree(desc);
4778c2ecf20Sopenharmony_ci		END_USE(vq);
4788c2ecf20Sopenharmony_ci		return -ENOSPC;
4798c2ecf20Sopenharmony_ci	}
4808c2ecf20Sopenharmony_ci
4818c2ecf20Sopenharmony_ci	for (n = 0; n < out_sgs; n++) {
4828c2ecf20Sopenharmony_ci		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
4838c2ecf20Sopenharmony_ci			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
4848c2ecf20Sopenharmony_ci			if (vring_mapping_error(vq, addr))
4858c2ecf20Sopenharmony_ci				goto unmap_release;
4868c2ecf20Sopenharmony_ci
4878c2ecf20Sopenharmony_ci			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
4888c2ecf20Sopenharmony_ci			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
4898c2ecf20Sopenharmony_ci			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
4908c2ecf20Sopenharmony_ci			prev = i;
4918c2ecf20Sopenharmony_ci			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
4928c2ecf20Sopenharmony_ci		}
4938c2ecf20Sopenharmony_ci	}
4948c2ecf20Sopenharmony_ci	for (; n < (out_sgs + in_sgs); n++) {
4958c2ecf20Sopenharmony_ci		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
4968c2ecf20Sopenharmony_ci			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
4978c2ecf20Sopenharmony_ci			if (vring_mapping_error(vq, addr))
4988c2ecf20Sopenharmony_ci				goto unmap_release;
4998c2ecf20Sopenharmony_ci
5008c2ecf20Sopenharmony_ci			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
5018c2ecf20Sopenharmony_ci			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
5028c2ecf20Sopenharmony_ci			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
5038c2ecf20Sopenharmony_ci			prev = i;
5048c2ecf20Sopenharmony_ci			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
5058c2ecf20Sopenharmony_ci		}
5068c2ecf20Sopenharmony_ci	}
5078c2ecf20Sopenharmony_ci	/* Last one doesn't continue. */
5088c2ecf20Sopenharmony_ci	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
5098c2ecf20Sopenharmony_ci
5108c2ecf20Sopenharmony_ci	if (indirect) {
5118c2ecf20Sopenharmony_ci		/* Now that the indirect table is filled in, map it. */
5128c2ecf20Sopenharmony_ci		dma_addr_t addr = vring_map_single(
5138c2ecf20Sopenharmony_ci			vq, desc, total_sg * sizeof(struct vring_desc),
5148c2ecf20Sopenharmony_ci			DMA_TO_DEVICE);
5158c2ecf20Sopenharmony_ci		if (vring_mapping_error(vq, addr))
5168c2ecf20Sopenharmony_ci			goto unmap_release;
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci		vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
5198c2ecf20Sopenharmony_ci				VRING_DESC_F_INDIRECT);
5208c2ecf20Sopenharmony_ci		vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
5218c2ecf20Sopenharmony_ci				addr);
5228c2ecf20Sopenharmony_ci
5238c2ecf20Sopenharmony_ci		vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
5248c2ecf20Sopenharmony_ci				total_sg * sizeof(struct vring_desc));
5258c2ecf20Sopenharmony_ci	}
5268c2ecf20Sopenharmony_ci
5278c2ecf20Sopenharmony_ci	/* We're using some buffers from the free list. */
5288c2ecf20Sopenharmony_ci	vq->vq.num_free -= descs_used;
5298c2ecf20Sopenharmony_ci
5308c2ecf20Sopenharmony_ci	/* Update free pointer */
5318c2ecf20Sopenharmony_ci	if (indirect)
5328c2ecf20Sopenharmony_ci		vq->free_head = virtio16_to_cpu(_vq->vdev,
5338c2ecf20Sopenharmony_ci					vq->split.vring.desc[head].next);
5348c2ecf20Sopenharmony_ci	else
5358c2ecf20Sopenharmony_ci		vq->free_head = i;
5368c2ecf20Sopenharmony_ci
5378c2ecf20Sopenharmony_ci	/* Store token and indirect buffer state. */
5388c2ecf20Sopenharmony_ci	vq->split.desc_state[head].data = data;
5398c2ecf20Sopenharmony_ci	if (indirect)
5408c2ecf20Sopenharmony_ci		vq->split.desc_state[head].indir_desc = desc;
5418c2ecf20Sopenharmony_ci	else
5428c2ecf20Sopenharmony_ci		vq->split.desc_state[head].indir_desc = ctx;
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci	/* Put entry in available array (but don't update avail->idx until they
5458c2ecf20Sopenharmony_ci	 * do sync). */
5468c2ecf20Sopenharmony_ci	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
5478c2ecf20Sopenharmony_ci	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
5488c2ecf20Sopenharmony_ci
5498c2ecf20Sopenharmony_ci	/* Descriptors and available array need to be set before we expose the
5508c2ecf20Sopenharmony_ci	 * new available array entries. */
5518c2ecf20Sopenharmony_ci	virtio_wmb(vq->weak_barriers);
5528c2ecf20Sopenharmony_ci	vq->split.avail_idx_shadow++;
5538c2ecf20Sopenharmony_ci	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
5548c2ecf20Sopenharmony_ci						vq->split.avail_idx_shadow);
5558c2ecf20Sopenharmony_ci	vq->num_added++;
5568c2ecf20Sopenharmony_ci
5578c2ecf20Sopenharmony_ci	pr_debug("Added buffer head %i to %p\n", head, vq);
5588c2ecf20Sopenharmony_ci	END_USE(vq);
5598c2ecf20Sopenharmony_ci
5608c2ecf20Sopenharmony_ci	/* This is very unlikely, but theoretically possible.  Kick
5618c2ecf20Sopenharmony_ci	 * just in case. */
5628c2ecf20Sopenharmony_ci	if (unlikely(vq->num_added == (1 << 16) - 1))
5638c2ecf20Sopenharmony_ci		virtqueue_kick(_vq);
5648c2ecf20Sopenharmony_ci
5658c2ecf20Sopenharmony_ci	return 0;
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_ciunmap_release:
5688c2ecf20Sopenharmony_ci	err_idx = i;
5698c2ecf20Sopenharmony_ci
5708c2ecf20Sopenharmony_ci	if (indirect)
5718c2ecf20Sopenharmony_ci		i = 0;
5728c2ecf20Sopenharmony_ci	else
5738c2ecf20Sopenharmony_ci		i = head;
5748c2ecf20Sopenharmony_ci
5758c2ecf20Sopenharmony_ci	for (n = 0; n < total_sg; n++) {
5768c2ecf20Sopenharmony_ci		if (i == err_idx)
5778c2ecf20Sopenharmony_ci			break;
5788c2ecf20Sopenharmony_ci		vring_unmap_one_split(vq, &desc[i]);
5798c2ecf20Sopenharmony_ci		i = virtio16_to_cpu(_vq->vdev, desc[i].next);
5808c2ecf20Sopenharmony_ci	}
5818c2ecf20Sopenharmony_ci
5828c2ecf20Sopenharmony_ci	if (indirect)
5838c2ecf20Sopenharmony_ci		kfree(desc);
5848c2ecf20Sopenharmony_ci
5858c2ecf20Sopenharmony_ci	END_USE(vq);
5868c2ecf20Sopenharmony_ci	return -ENOMEM;
5878c2ecf20Sopenharmony_ci}
5888c2ecf20Sopenharmony_ci
5898c2ecf20Sopenharmony_cistatic bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
5908c2ecf20Sopenharmony_ci{
5918c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
5928c2ecf20Sopenharmony_ci	u16 new, old;
5938c2ecf20Sopenharmony_ci	bool needs_kick;
5948c2ecf20Sopenharmony_ci
5958c2ecf20Sopenharmony_ci	START_USE(vq);
5968c2ecf20Sopenharmony_ci	/* We need to expose available array entries before checking avail
5978c2ecf20Sopenharmony_ci	 * event. */
5988c2ecf20Sopenharmony_ci	virtio_mb(vq->weak_barriers);
5998c2ecf20Sopenharmony_ci
6008c2ecf20Sopenharmony_ci	old = vq->split.avail_idx_shadow - vq->num_added;
6018c2ecf20Sopenharmony_ci	new = vq->split.avail_idx_shadow;
6028c2ecf20Sopenharmony_ci	vq->num_added = 0;
6038c2ecf20Sopenharmony_ci
6048c2ecf20Sopenharmony_ci	LAST_ADD_TIME_CHECK(vq);
6058c2ecf20Sopenharmony_ci	LAST_ADD_TIME_INVALID(vq);
6068c2ecf20Sopenharmony_ci
6078c2ecf20Sopenharmony_ci	if (vq->event) {
6088c2ecf20Sopenharmony_ci		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
6098c2ecf20Sopenharmony_ci					vring_avail_event(&vq->split.vring)),
6108c2ecf20Sopenharmony_ci					      new, old);
6118c2ecf20Sopenharmony_ci	} else {
6128c2ecf20Sopenharmony_ci		needs_kick = !(vq->split.vring.used->flags &
6138c2ecf20Sopenharmony_ci					cpu_to_virtio16(_vq->vdev,
6148c2ecf20Sopenharmony_ci						VRING_USED_F_NO_NOTIFY));
6158c2ecf20Sopenharmony_ci	}
6168c2ecf20Sopenharmony_ci	END_USE(vq);
6178c2ecf20Sopenharmony_ci	return needs_kick;
6188c2ecf20Sopenharmony_ci}
6198c2ecf20Sopenharmony_ci
6208c2ecf20Sopenharmony_cistatic void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
6218c2ecf20Sopenharmony_ci			     void **ctx)
6228c2ecf20Sopenharmony_ci{
6238c2ecf20Sopenharmony_ci	unsigned int i, j;
6248c2ecf20Sopenharmony_ci	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
6258c2ecf20Sopenharmony_ci
6268c2ecf20Sopenharmony_ci	/* Clear data ptr. */
6278c2ecf20Sopenharmony_ci	vq->split.desc_state[head].data = NULL;
6288c2ecf20Sopenharmony_ci
6298c2ecf20Sopenharmony_ci	/* Put back on free list: unmap first-level descriptors and find end */
6308c2ecf20Sopenharmony_ci	i = head;
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_ci	while (vq->split.vring.desc[i].flags & nextflag) {
6338c2ecf20Sopenharmony_ci		vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
6348c2ecf20Sopenharmony_ci		i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
6358c2ecf20Sopenharmony_ci		vq->vq.num_free++;
6368c2ecf20Sopenharmony_ci	}
6378c2ecf20Sopenharmony_ci
6388c2ecf20Sopenharmony_ci	vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
6398c2ecf20Sopenharmony_ci	vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
6408c2ecf20Sopenharmony_ci						vq->free_head);
6418c2ecf20Sopenharmony_ci	vq->free_head = head;
6428c2ecf20Sopenharmony_ci
6438c2ecf20Sopenharmony_ci	/* Plus final descriptor */
6448c2ecf20Sopenharmony_ci	vq->vq.num_free++;
6458c2ecf20Sopenharmony_ci
6468c2ecf20Sopenharmony_ci	if (vq->indirect) {
6478c2ecf20Sopenharmony_ci		struct vring_desc *indir_desc =
6488c2ecf20Sopenharmony_ci				vq->split.desc_state[head].indir_desc;
6498c2ecf20Sopenharmony_ci		u32 len;
6508c2ecf20Sopenharmony_ci
6518c2ecf20Sopenharmony_ci		/* Free the indirect table, if any, now that it's unmapped. */
6528c2ecf20Sopenharmony_ci		if (!indir_desc)
6538c2ecf20Sopenharmony_ci			return;
6548c2ecf20Sopenharmony_ci
6558c2ecf20Sopenharmony_ci		len = virtio32_to_cpu(vq->vq.vdev,
6568c2ecf20Sopenharmony_ci				vq->split.vring.desc[head].len);
6578c2ecf20Sopenharmony_ci
6588c2ecf20Sopenharmony_ci		BUG_ON(!(vq->split.vring.desc[head].flags &
6598c2ecf20Sopenharmony_ci			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
6608c2ecf20Sopenharmony_ci		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
6618c2ecf20Sopenharmony_ci
6628c2ecf20Sopenharmony_ci		for (j = 0; j < len / sizeof(struct vring_desc); j++)
6638c2ecf20Sopenharmony_ci			vring_unmap_one_split(vq, &indir_desc[j]);
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci		kfree(indir_desc);
6668c2ecf20Sopenharmony_ci		vq->split.desc_state[head].indir_desc = NULL;
6678c2ecf20Sopenharmony_ci	} else if (ctx) {
6688c2ecf20Sopenharmony_ci		*ctx = vq->split.desc_state[head].indir_desc;
6698c2ecf20Sopenharmony_ci	}
6708c2ecf20Sopenharmony_ci}
6718c2ecf20Sopenharmony_ci
6728c2ecf20Sopenharmony_cistatic inline bool more_used_split(const struct vring_virtqueue *vq)
6738c2ecf20Sopenharmony_ci{
6748c2ecf20Sopenharmony_ci	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
6758c2ecf20Sopenharmony_ci			vq->split.vring.used->idx);
6768c2ecf20Sopenharmony_ci}
6778c2ecf20Sopenharmony_ci
6788c2ecf20Sopenharmony_cistatic void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
6798c2ecf20Sopenharmony_ci					 unsigned int *len,
6808c2ecf20Sopenharmony_ci					 void **ctx)
6818c2ecf20Sopenharmony_ci{
6828c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
6838c2ecf20Sopenharmony_ci	void *ret;
6848c2ecf20Sopenharmony_ci	unsigned int i;
6858c2ecf20Sopenharmony_ci	u16 last_used;
6868c2ecf20Sopenharmony_ci
6878c2ecf20Sopenharmony_ci	START_USE(vq);
6888c2ecf20Sopenharmony_ci
6898c2ecf20Sopenharmony_ci	if (unlikely(vq->broken)) {
6908c2ecf20Sopenharmony_ci		END_USE(vq);
6918c2ecf20Sopenharmony_ci		return NULL;
6928c2ecf20Sopenharmony_ci	}
6938c2ecf20Sopenharmony_ci
6948c2ecf20Sopenharmony_ci	if (!more_used_split(vq)) {
6958c2ecf20Sopenharmony_ci		pr_debug("No more buffers in queue\n");
6968c2ecf20Sopenharmony_ci		END_USE(vq);
6978c2ecf20Sopenharmony_ci		return NULL;
6988c2ecf20Sopenharmony_ci	}
6998c2ecf20Sopenharmony_ci
7008c2ecf20Sopenharmony_ci	/* Only get used array entries after they have been exposed by host. */
7018c2ecf20Sopenharmony_ci	virtio_rmb(vq->weak_barriers);
7028c2ecf20Sopenharmony_ci
7038c2ecf20Sopenharmony_ci	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
7048c2ecf20Sopenharmony_ci	i = virtio32_to_cpu(_vq->vdev,
7058c2ecf20Sopenharmony_ci			vq->split.vring.used->ring[last_used].id);
7068c2ecf20Sopenharmony_ci	*len = virtio32_to_cpu(_vq->vdev,
7078c2ecf20Sopenharmony_ci			vq->split.vring.used->ring[last_used].len);
7088c2ecf20Sopenharmony_ci
7098c2ecf20Sopenharmony_ci	if (unlikely(i >= vq->split.vring.num)) {
7108c2ecf20Sopenharmony_ci		BAD_RING(vq, "id %u out of range\n", i);
7118c2ecf20Sopenharmony_ci		return NULL;
7128c2ecf20Sopenharmony_ci	}
7138c2ecf20Sopenharmony_ci	if (unlikely(!vq->split.desc_state[i].data)) {
7148c2ecf20Sopenharmony_ci		BAD_RING(vq, "id %u is not a head!\n", i);
7158c2ecf20Sopenharmony_ci		return NULL;
7168c2ecf20Sopenharmony_ci	}
7178c2ecf20Sopenharmony_ci
7188c2ecf20Sopenharmony_ci	/* detach_buf_split clears data, so grab it now. */
7198c2ecf20Sopenharmony_ci	ret = vq->split.desc_state[i].data;
7208c2ecf20Sopenharmony_ci	detach_buf_split(vq, i, ctx);
7218c2ecf20Sopenharmony_ci	vq->last_used_idx++;
7228c2ecf20Sopenharmony_ci	/* If we expect an interrupt for the next entry, tell host
7238c2ecf20Sopenharmony_ci	 * by writing event index and flush out the write before
7248c2ecf20Sopenharmony_ci	 * the read in the next get_buf call. */
7258c2ecf20Sopenharmony_ci	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
7268c2ecf20Sopenharmony_ci		virtio_store_mb(vq->weak_barriers,
7278c2ecf20Sopenharmony_ci				&vring_used_event(&vq->split.vring),
7288c2ecf20Sopenharmony_ci				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
7298c2ecf20Sopenharmony_ci
7308c2ecf20Sopenharmony_ci	LAST_ADD_TIME_INVALID(vq);
7318c2ecf20Sopenharmony_ci
7328c2ecf20Sopenharmony_ci	END_USE(vq);
7338c2ecf20Sopenharmony_ci	return ret;
7348c2ecf20Sopenharmony_ci}
7358c2ecf20Sopenharmony_ci
7368c2ecf20Sopenharmony_cistatic void virtqueue_disable_cb_split(struct virtqueue *_vq)
7378c2ecf20Sopenharmony_ci{
7388c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
7398c2ecf20Sopenharmony_ci
7408c2ecf20Sopenharmony_ci	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
7418c2ecf20Sopenharmony_ci		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
7428c2ecf20Sopenharmony_ci		if (!vq->event)
7438c2ecf20Sopenharmony_ci			vq->split.vring.avail->flags =
7448c2ecf20Sopenharmony_ci				cpu_to_virtio16(_vq->vdev,
7458c2ecf20Sopenharmony_ci						vq->split.avail_flags_shadow);
7468c2ecf20Sopenharmony_ci	}
7478c2ecf20Sopenharmony_ci}
7488c2ecf20Sopenharmony_ci
7498c2ecf20Sopenharmony_cistatic unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
7508c2ecf20Sopenharmony_ci{
7518c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
7528c2ecf20Sopenharmony_ci	u16 last_used_idx;
7538c2ecf20Sopenharmony_ci
7548c2ecf20Sopenharmony_ci	START_USE(vq);
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci	/* We optimistically turn back on interrupts, then check if there was
7578c2ecf20Sopenharmony_ci	 * more to do. */
7588c2ecf20Sopenharmony_ci	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
7598c2ecf20Sopenharmony_ci	 * either clear the flags bit or point the event index at the next
7608c2ecf20Sopenharmony_ci	 * entry. Always do both to keep code simple. */
7618c2ecf20Sopenharmony_ci	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
7628c2ecf20Sopenharmony_ci		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
7638c2ecf20Sopenharmony_ci		if (!vq->event)
7648c2ecf20Sopenharmony_ci			vq->split.vring.avail->flags =
7658c2ecf20Sopenharmony_ci				cpu_to_virtio16(_vq->vdev,
7668c2ecf20Sopenharmony_ci						vq->split.avail_flags_shadow);
7678c2ecf20Sopenharmony_ci	}
7688c2ecf20Sopenharmony_ci	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
7698c2ecf20Sopenharmony_ci			last_used_idx = vq->last_used_idx);
7708c2ecf20Sopenharmony_ci	END_USE(vq);
7718c2ecf20Sopenharmony_ci	return last_used_idx;
7728c2ecf20Sopenharmony_ci}
7738c2ecf20Sopenharmony_ci
7748c2ecf20Sopenharmony_cistatic bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
7758c2ecf20Sopenharmony_ci{
7768c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
7778c2ecf20Sopenharmony_ci
7788c2ecf20Sopenharmony_ci	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
7798c2ecf20Sopenharmony_ci			vq->split.vring.used->idx);
7808c2ecf20Sopenharmony_ci}
7818c2ecf20Sopenharmony_ci
7828c2ecf20Sopenharmony_cistatic bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
7838c2ecf20Sopenharmony_ci{
7848c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
7858c2ecf20Sopenharmony_ci	u16 bufs;
7868c2ecf20Sopenharmony_ci
7878c2ecf20Sopenharmony_ci	START_USE(vq);
7888c2ecf20Sopenharmony_ci
7898c2ecf20Sopenharmony_ci	/* We optimistically turn back on interrupts, then check if there was
7908c2ecf20Sopenharmony_ci	 * more to do. */
7918c2ecf20Sopenharmony_ci	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
7928c2ecf20Sopenharmony_ci	 * either clear the flags bit or point the event index at the next
7938c2ecf20Sopenharmony_ci	 * entry. Always update the event index to keep code simple. */
7948c2ecf20Sopenharmony_ci	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
7958c2ecf20Sopenharmony_ci		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
7968c2ecf20Sopenharmony_ci		if (!vq->event)
7978c2ecf20Sopenharmony_ci			vq->split.vring.avail->flags =
7988c2ecf20Sopenharmony_ci				cpu_to_virtio16(_vq->vdev,
7998c2ecf20Sopenharmony_ci						vq->split.avail_flags_shadow);
8008c2ecf20Sopenharmony_ci	}
8018c2ecf20Sopenharmony_ci	/* TODO: tune this threshold */
8028c2ecf20Sopenharmony_ci	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
8038c2ecf20Sopenharmony_ci
8048c2ecf20Sopenharmony_ci	virtio_store_mb(vq->weak_barriers,
8058c2ecf20Sopenharmony_ci			&vring_used_event(&vq->split.vring),
8068c2ecf20Sopenharmony_ci			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
8078c2ecf20Sopenharmony_ci
8088c2ecf20Sopenharmony_ci	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
8098c2ecf20Sopenharmony_ci					- vq->last_used_idx) > bufs)) {
8108c2ecf20Sopenharmony_ci		END_USE(vq);
8118c2ecf20Sopenharmony_ci		return false;
8128c2ecf20Sopenharmony_ci	}
8138c2ecf20Sopenharmony_ci
8148c2ecf20Sopenharmony_ci	END_USE(vq);
8158c2ecf20Sopenharmony_ci	return true;
8168c2ecf20Sopenharmony_ci}
8178c2ecf20Sopenharmony_ci
8188c2ecf20Sopenharmony_cistatic void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
8198c2ecf20Sopenharmony_ci{
8208c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
8218c2ecf20Sopenharmony_ci	unsigned int i;
8228c2ecf20Sopenharmony_ci	void *buf;
8238c2ecf20Sopenharmony_ci
8248c2ecf20Sopenharmony_ci	START_USE(vq);
8258c2ecf20Sopenharmony_ci
8268c2ecf20Sopenharmony_ci	for (i = 0; i < vq->split.vring.num; i++) {
8278c2ecf20Sopenharmony_ci		if (!vq->split.desc_state[i].data)
8288c2ecf20Sopenharmony_ci			continue;
8298c2ecf20Sopenharmony_ci		/* detach_buf_split clears data, so grab it now. */
8308c2ecf20Sopenharmony_ci		buf = vq->split.desc_state[i].data;
8318c2ecf20Sopenharmony_ci		detach_buf_split(vq, i, NULL);
8328c2ecf20Sopenharmony_ci		vq->split.avail_idx_shadow--;
8338c2ecf20Sopenharmony_ci		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
8348c2ecf20Sopenharmony_ci				vq->split.avail_idx_shadow);
8358c2ecf20Sopenharmony_ci		END_USE(vq);
8368c2ecf20Sopenharmony_ci		return buf;
8378c2ecf20Sopenharmony_ci	}
8388c2ecf20Sopenharmony_ci	/* That should have freed everything. */
8398c2ecf20Sopenharmony_ci	BUG_ON(vq->vq.num_free != vq->split.vring.num);
8408c2ecf20Sopenharmony_ci
8418c2ecf20Sopenharmony_ci	END_USE(vq);
8428c2ecf20Sopenharmony_ci	return NULL;
8438c2ecf20Sopenharmony_ci}
8448c2ecf20Sopenharmony_ci
8458c2ecf20Sopenharmony_cistatic struct virtqueue *vring_create_virtqueue_split(
8468c2ecf20Sopenharmony_ci	unsigned int index,
8478c2ecf20Sopenharmony_ci	unsigned int num,
8488c2ecf20Sopenharmony_ci	unsigned int vring_align,
8498c2ecf20Sopenharmony_ci	struct virtio_device *vdev,
8508c2ecf20Sopenharmony_ci	bool weak_barriers,
8518c2ecf20Sopenharmony_ci	bool may_reduce_num,
8528c2ecf20Sopenharmony_ci	bool context,
8538c2ecf20Sopenharmony_ci	bool (*notify)(struct virtqueue *),
8548c2ecf20Sopenharmony_ci	void (*callback)(struct virtqueue *),
8558c2ecf20Sopenharmony_ci	const char *name)
8568c2ecf20Sopenharmony_ci{
8578c2ecf20Sopenharmony_ci	struct virtqueue *vq;
8588c2ecf20Sopenharmony_ci	void *queue = NULL;
8598c2ecf20Sopenharmony_ci	dma_addr_t dma_addr;
8608c2ecf20Sopenharmony_ci	size_t queue_size_in_bytes;
8618c2ecf20Sopenharmony_ci	struct vring vring;
8628c2ecf20Sopenharmony_ci
8638c2ecf20Sopenharmony_ci	/* We assume num is a power of 2. */
8648c2ecf20Sopenharmony_ci	if (num & (num - 1)) {
8658c2ecf20Sopenharmony_ci		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
8668c2ecf20Sopenharmony_ci		return NULL;
8678c2ecf20Sopenharmony_ci	}
8688c2ecf20Sopenharmony_ci
8698c2ecf20Sopenharmony_ci	/* TODO: allocate each queue chunk individually */
8708c2ecf20Sopenharmony_ci	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
8718c2ecf20Sopenharmony_ci		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
8728c2ecf20Sopenharmony_ci					  &dma_addr,
8738c2ecf20Sopenharmony_ci					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
8748c2ecf20Sopenharmony_ci		if (queue)
8758c2ecf20Sopenharmony_ci			break;
8768c2ecf20Sopenharmony_ci		if (!may_reduce_num)
8778c2ecf20Sopenharmony_ci			return NULL;
8788c2ecf20Sopenharmony_ci	}
8798c2ecf20Sopenharmony_ci
8808c2ecf20Sopenharmony_ci	if (!num)
8818c2ecf20Sopenharmony_ci		return NULL;
8828c2ecf20Sopenharmony_ci
8838c2ecf20Sopenharmony_ci	if (!queue) {
8848c2ecf20Sopenharmony_ci		/* Try to get a single page. You are my only hope! */
8858c2ecf20Sopenharmony_ci		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
8868c2ecf20Sopenharmony_ci					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
8878c2ecf20Sopenharmony_ci	}
8888c2ecf20Sopenharmony_ci	if (!queue)
8898c2ecf20Sopenharmony_ci		return NULL;
8908c2ecf20Sopenharmony_ci
8918c2ecf20Sopenharmony_ci	queue_size_in_bytes = vring_size(num, vring_align);
8928c2ecf20Sopenharmony_ci	vring_init(&vring, num, queue, vring_align);
8938c2ecf20Sopenharmony_ci
8948c2ecf20Sopenharmony_ci	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
8958c2ecf20Sopenharmony_ci				   notify, callback, name);
8968c2ecf20Sopenharmony_ci	if (!vq) {
8978c2ecf20Sopenharmony_ci		vring_free_queue(vdev, queue_size_in_bytes, queue,
8988c2ecf20Sopenharmony_ci				 dma_addr);
8998c2ecf20Sopenharmony_ci		return NULL;
9008c2ecf20Sopenharmony_ci	}
9018c2ecf20Sopenharmony_ci
9028c2ecf20Sopenharmony_ci	to_vvq(vq)->split.queue_dma_addr = dma_addr;
9038c2ecf20Sopenharmony_ci	to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
9048c2ecf20Sopenharmony_ci	to_vvq(vq)->we_own_ring = true;
9058c2ecf20Sopenharmony_ci
9068c2ecf20Sopenharmony_ci	return vq;
9078c2ecf20Sopenharmony_ci}
9088c2ecf20Sopenharmony_ci
9098c2ecf20Sopenharmony_ci
9108c2ecf20Sopenharmony_ci/*
9118c2ecf20Sopenharmony_ci * Packed ring specific functions - *_packed().
9128c2ecf20Sopenharmony_ci */
9138c2ecf20Sopenharmony_ci
9148c2ecf20Sopenharmony_cistatic void vring_unmap_state_packed(const struct vring_virtqueue *vq,
9158c2ecf20Sopenharmony_ci				     struct vring_desc_extra_packed *state)
9168c2ecf20Sopenharmony_ci{
9178c2ecf20Sopenharmony_ci	u16 flags;
9188c2ecf20Sopenharmony_ci
9198c2ecf20Sopenharmony_ci	if (!vq->use_dma_api)
9208c2ecf20Sopenharmony_ci		return;
9218c2ecf20Sopenharmony_ci
9228c2ecf20Sopenharmony_ci	flags = state->flags;
9238c2ecf20Sopenharmony_ci
9248c2ecf20Sopenharmony_ci	if (flags & VRING_DESC_F_INDIRECT) {
9258c2ecf20Sopenharmony_ci		dma_unmap_single(vring_dma_dev(vq),
9268c2ecf20Sopenharmony_ci				 state->addr, state->len,
9278c2ecf20Sopenharmony_ci				 (flags & VRING_DESC_F_WRITE) ?
9288c2ecf20Sopenharmony_ci				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
9298c2ecf20Sopenharmony_ci	} else {
9308c2ecf20Sopenharmony_ci		dma_unmap_page(vring_dma_dev(vq),
9318c2ecf20Sopenharmony_ci			       state->addr, state->len,
9328c2ecf20Sopenharmony_ci			       (flags & VRING_DESC_F_WRITE) ?
9338c2ecf20Sopenharmony_ci			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
9348c2ecf20Sopenharmony_ci	}
9358c2ecf20Sopenharmony_ci}
9368c2ecf20Sopenharmony_ci
9378c2ecf20Sopenharmony_cistatic void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
9388c2ecf20Sopenharmony_ci				   struct vring_packed_desc *desc)
9398c2ecf20Sopenharmony_ci{
9408c2ecf20Sopenharmony_ci	u16 flags;
9418c2ecf20Sopenharmony_ci
9428c2ecf20Sopenharmony_ci	if (!vq->use_dma_api)
9438c2ecf20Sopenharmony_ci		return;
9448c2ecf20Sopenharmony_ci
9458c2ecf20Sopenharmony_ci	flags = le16_to_cpu(desc->flags);
9468c2ecf20Sopenharmony_ci
9478c2ecf20Sopenharmony_ci	if (flags & VRING_DESC_F_INDIRECT) {
9488c2ecf20Sopenharmony_ci		dma_unmap_single(vring_dma_dev(vq),
9498c2ecf20Sopenharmony_ci				 le64_to_cpu(desc->addr),
9508c2ecf20Sopenharmony_ci				 le32_to_cpu(desc->len),
9518c2ecf20Sopenharmony_ci				 (flags & VRING_DESC_F_WRITE) ?
9528c2ecf20Sopenharmony_ci				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
9538c2ecf20Sopenharmony_ci	} else {
9548c2ecf20Sopenharmony_ci		dma_unmap_page(vring_dma_dev(vq),
9558c2ecf20Sopenharmony_ci			       le64_to_cpu(desc->addr),
9568c2ecf20Sopenharmony_ci			       le32_to_cpu(desc->len),
9578c2ecf20Sopenharmony_ci			       (flags & VRING_DESC_F_WRITE) ?
9588c2ecf20Sopenharmony_ci			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
9598c2ecf20Sopenharmony_ci	}
9608c2ecf20Sopenharmony_ci}
9618c2ecf20Sopenharmony_ci
9628c2ecf20Sopenharmony_cistatic struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
9638c2ecf20Sopenharmony_ci						       gfp_t gfp)
9648c2ecf20Sopenharmony_ci{
9658c2ecf20Sopenharmony_ci	struct vring_packed_desc *desc;
9668c2ecf20Sopenharmony_ci
9678c2ecf20Sopenharmony_ci	/*
9688c2ecf20Sopenharmony_ci	 * We require lowmem mappings for the descriptors because
9698c2ecf20Sopenharmony_ci	 * otherwise virt_to_phys will give us bogus addresses in the
9708c2ecf20Sopenharmony_ci	 * virtqueue.
9718c2ecf20Sopenharmony_ci	 */
9728c2ecf20Sopenharmony_ci	gfp &= ~__GFP_HIGHMEM;
9738c2ecf20Sopenharmony_ci
9748c2ecf20Sopenharmony_ci	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
9758c2ecf20Sopenharmony_ci
9768c2ecf20Sopenharmony_ci	return desc;
9778c2ecf20Sopenharmony_ci}
9788c2ecf20Sopenharmony_ci
9798c2ecf20Sopenharmony_cistatic int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
9808c2ecf20Sopenharmony_ci				       struct scatterlist *sgs[],
9818c2ecf20Sopenharmony_ci				       unsigned int total_sg,
9828c2ecf20Sopenharmony_ci				       unsigned int out_sgs,
9838c2ecf20Sopenharmony_ci				       unsigned int in_sgs,
9848c2ecf20Sopenharmony_ci				       void *data,
9858c2ecf20Sopenharmony_ci				       gfp_t gfp)
9868c2ecf20Sopenharmony_ci{
9878c2ecf20Sopenharmony_ci	struct vring_packed_desc *desc;
9888c2ecf20Sopenharmony_ci	struct scatterlist *sg;
9898c2ecf20Sopenharmony_ci	unsigned int i, n, err_idx;
9908c2ecf20Sopenharmony_ci	u16 head, id;
9918c2ecf20Sopenharmony_ci	dma_addr_t addr;
9928c2ecf20Sopenharmony_ci
9938c2ecf20Sopenharmony_ci	head = vq->packed.next_avail_idx;
9948c2ecf20Sopenharmony_ci	desc = alloc_indirect_packed(total_sg, gfp);
9958c2ecf20Sopenharmony_ci	if (!desc)
9968c2ecf20Sopenharmony_ci		return -ENOMEM;
9978c2ecf20Sopenharmony_ci
9988c2ecf20Sopenharmony_ci	if (unlikely(vq->vq.num_free < 1)) {
9998c2ecf20Sopenharmony_ci		pr_debug("Can't add buf len 1 - avail = 0\n");
10008c2ecf20Sopenharmony_ci		kfree(desc);
10018c2ecf20Sopenharmony_ci		END_USE(vq);
10028c2ecf20Sopenharmony_ci		return -ENOSPC;
10038c2ecf20Sopenharmony_ci	}
10048c2ecf20Sopenharmony_ci
10058c2ecf20Sopenharmony_ci	i = 0;
10068c2ecf20Sopenharmony_ci	id = vq->free_head;
10078c2ecf20Sopenharmony_ci	BUG_ON(id == vq->packed.vring.num);
10088c2ecf20Sopenharmony_ci
10098c2ecf20Sopenharmony_ci	for (n = 0; n < out_sgs + in_sgs; n++) {
10108c2ecf20Sopenharmony_ci		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
10118c2ecf20Sopenharmony_ci			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
10128c2ecf20Sopenharmony_ci					DMA_TO_DEVICE : DMA_FROM_DEVICE);
10138c2ecf20Sopenharmony_ci			if (vring_mapping_error(vq, addr))
10148c2ecf20Sopenharmony_ci				goto unmap_release;
10158c2ecf20Sopenharmony_ci
10168c2ecf20Sopenharmony_ci			desc[i].flags = cpu_to_le16(n < out_sgs ?
10178c2ecf20Sopenharmony_ci						0 : VRING_DESC_F_WRITE);
10188c2ecf20Sopenharmony_ci			desc[i].addr = cpu_to_le64(addr);
10198c2ecf20Sopenharmony_ci			desc[i].len = cpu_to_le32(sg->length);
10208c2ecf20Sopenharmony_ci			i++;
10218c2ecf20Sopenharmony_ci		}
10228c2ecf20Sopenharmony_ci	}
10238c2ecf20Sopenharmony_ci
10248c2ecf20Sopenharmony_ci	/* Now that the indirect table is filled in, map it. */
10258c2ecf20Sopenharmony_ci	addr = vring_map_single(vq, desc,
10268c2ecf20Sopenharmony_ci			total_sg * sizeof(struct vring_packed_desc),
10278c2ecf20Sopenharmony_ci			DMA_TO_DEVICE);
10288c2ecf20Sopenharmony_ci	if (vring_mapping_error(vq, addr))
10298c2ecf20Sopenharmony_ci		goto unmap_release;
10308c2ecf20Sopenharmony_ci
10318c2ecf20Sopenharmony_ci	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
10328c2ecf20Sopenharmony_ci	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
10338c2ecf20Sopenharmony_ci				sizeof(struct vring_packed_desc));
10348c2ecf20Sopenharmony_ci	vq->packed.vring.desc[head].id = cpu_to_le16(id);
10358c2ecf20Sopenharmony_ci
10368c2ecf20Sopenharmony_ci	if (vq->use_dma_api) {
10378c2ecf20Sopenharmony_ci		vq->packed.desc_extra[id].addr = addr;
10388c2ecf20Sopenharmony_ci		vq->packed.desc_extra[id].len = total_sg *
10398c2ecf20Sopenharmony_ci				sizeof(struct vring_packed_desc);
10408c2ecf20Sopenharmony_ci		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
10418c2ecf20Sopenharmony_ci						  vq->packed.avail_used_flags;
10428c2ecf20Sopenharmony_ci	}
10438c2ecf20Sopenharmony_ci
10448c2ecf20Sopenharmony_ci	/*
10458c2ecf20Sopenharmony_ci	 * A driver MUST NOT make the first descriptor in the list
10468c2ecf20Sopenharmony_ci	 * available before all subsequent descriptors comprising
10478c2ecf20Sopenharmony_ci	 * the list are made available.
10488c2ecf20Sopenharmony_ci	 */
10498c2ecf20Sopenharmony_ci	virtio_wmb(vq->weak_barriers);
10508c2ecf20Sopenharmony_ci	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
10518c2ecf20Sopenharmony_ci						vq->packed.avail_used_flags);
10528c2ecf20Sopenharmony_ci
10538c2ecf20Sopenharmony_ci	/* We're using some buffers from the free list. */
10548c2ecf20Sopenharmony_ci	vq->vq.num_free -= 1;
10558c2ecf20Sopenharmony_ci
10568c2ecf20Sopenharmony_ci	/* Update free pointer */
10578c2ecf20Sopenharmony_ci	n = head + 1;
10588c2ecf20Sopenharmony_ci	if (n >= vq->packed.vring.num) {
10598c2ecf20Sopenharmony_ci		n = 0;
10608c2ecf20Sopenharmony_ci		vq->packed.avail_wrap_counter ^= 1;
10618c2ecf20Sopenharmony_ci		vq->packed.avail_used_flags ^=
10628c2ecf20Sopenharmony_ci				1 << VRING_PACKED_DESC_F_AVAIL |
10638c2ecf20Sopenharmony_ci				1 << VRING_PACKED_DESC_F_USED;
10648c2ecf20Sopenharmony_ci	}
10658c2ecf20Sopenharmony_ci	vq->packed.next_avail_idx = n;
10668c2ecf20Sopenharmony_ci	vq->free_head = vq->packed.desc_state[id].next;
10678c2ecf20Sopenharmony_ci
10688c2ecf20Sopenharmony_ci	/* Store token and indirect buffer state. */
10698c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].num = 1;
10708c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].data = data;
10718c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].indir_desc = desc;
10728c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].last = id;
10738c2ecf20Sopenharmony_ci
10748c2ecf20Sopenharmony_ci	vq->num_added += 1;
10758c2ecf20Sopenharmony_ci
10768c2ecf20Sopenharmony_ci	pr_debug("Added buffer head %i to %p\n", head, vq);
10778c2ecf20Sopenharmony_ci	END_USE(vq);
10788c2ecf20Sopenharmony_ci
10798c2ecf20Sopenharmony_ci	return 0;
10808c2ecf20Sopenharmony_ci
10818c2ecf20Sopenharmony_ciunmap_release:
10828c2ecf20Sopenharmony_ci	err_idx = i;
10838c2ecf20Sopenharmony_ci
10848c2ecf20Sopenharmony_ci	for (i = 0; i < err_idx; i++)
10858c2ecf20Sopenharmony_ci		vring_unmap_desc_packed(vq, &desc[i]);
10868c2ecf20Sopenharmony_ci
10878c2ecf20Sopenharmony_ci	kfree(desc);
10888c2ecf20Sopenharmony_ci
10898c2ecf20Sopenharmony_ci	END_USE(vq);
10908c2ecf20Sopenharmony_ci	return -ENOMEM;
10918c2ecf20Sopenharmony_ci}
10928c2ecf20Sopenharmony_ci
10938c2ecf20Sopenharmony_cistatic inline int virtqueue_add_packed(struct virtqueue *_vq,
10948c2ecf20Sopenharmony_ci				       struct scatterlist *sgs[],
10958c2ecf20Sopenharmony_ci				       unsigned int total_sg,
10968c2ecf20Sopenharmony_ci				       unsigned int out_sgs,
10978c2ecf20Sopenharmony_ci				       unsigned int in_sgs,
10988c2ecf20Sopenharmony_ci				       void *data,
10998c2ecf20Sopenharmony_ci				       void *ctx,
11008c2ecf20Sopenharmony_ci				       gfp_t gfp)
11018c2ecf20Sopenharmony_ci{
11028c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
11038c2ecf20Sopenharmony_ci	struct vring_packed_desc *desc;
11048c2ecf20Sopenharmony_ci	struct scatterlist *sg;
11058c2ecf20Sopenharmony_ci	unsigned int i, n, c, descs_used, err_idx;
11068c2ecf20Sopenharmony_ci	__le16 head_flags, flags;
11078c2ecf20Sopenharmony_ci	u16 head, id, prev, curr, avail_used_flags;
11088c2ecf20Sopenharmony_ci	int err;
11098c2ecf20Sopenharmony_ci
11108c2ecf20Sopenharmony_ci	START_USE(vq);
11118c2ecf20Sopenharmony_ci
11128c2ecf20Sopenharmony_ci	BUG_ON(data == NULL);
11138c2ecf20Sopenharmony_ci	BUG_ON(ctx && vq->indirect);
11148c2ecf20Sopenharmony_ci
11158c2ecf20Sopenharmony_ci	if (unlikely(vq->broken)) {
11168c2ecf20Sopenharmony_ci		END_USE(vq);
11178c2ecf20Sopenharmony_ci		return -EIO;
11188c2ecf20Sopenharmony_ci	}
11198c2ecf20Sopenharmony_ci
11208c2ecf20Sopenharmony_ci	LAST_ADD_TIME_UPDATE(vq);
11218c2ecf20Sopenharmony_ci
11228c2ecf20Sopenharmony_ci	BUG_ON(total_sg == 0);
11238c2ecf20Sopenharmony_ci
11248c2ecf20Sopenharmony_ci	if (virtqueue_use_indirect(_vq, total_sg)) {
11258c2ecf20Sopenharmony_ci		err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
11268c2ecf20Sopenharmony_ci						    in_sgs, data, gfp);
11278c2ecf20Sopenharmony_ci		if (err != -ENOMEM) {
11288c2ecf20Sopenharmony_ci			END_USE(vq);
11298c2ecf20Sopenharmony_ci			return err;
11308c2ecf20Sopenharmony_ci		}
11318c2ecf20Sopenharmony_ci
11328c2ecf20Sopenharmony_ci		/* fall back on direct */
11338c2ecf20Sopenharmony_ci	}
11348c2ecf20Sopenharmony_ci
11358c2ecf20Sopenharmony_ci	head = vq->packed.next_avail_idx;
11368c2ecf20Sopenharmony_ci	avail_used_flags = vq->packed.avail_used_flags;
11378c2ecf20Sopenharmony_ci
11388c2ecf20Sopenharmony_ci	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
11398c2ecf20Sopenharmony_ci
11408c2ecf20Sopenharmony_ci	desc = vq->packed.vring.desc;
11418c2ecf20Sopenharmony_ci	i = head;
11428c2ecf20Sopenharmony_ci	descs_used = total_sg;
11438c2ecf20Sopenharmony_ci
11448c2ecf20Sopenharmony_ci	if (unlikely(vq->vq.num_free < descs_used)) {
11458c2ecf20Sopenharmony_ci		pr_debug("Can't add buf len %i - avail = %i\n",
11468c2ecf20Sopenharmony_ci			 descs_used, vq->vq.num_free);
11478c2ecf20Sopenharmony_ci		END_USE(vq);
11488c2ecf20Sopenharmony_ci		return -ENOSPC;
11498c2ecf20Sopenharmony_ci	}
11508c2ecf20Sopenharmony_ci
11518c2ecf20Sopenharmony_ci	id = vq->free_head;
11528c2ecf20Sopenharmony_ci	BUG_ON(id == vq->packed.vring.num);
11538c2ecf20Sopenharmony_ci
11548c2ecf20Sopenharmony_ci	curr = id;
11558c2ecf20Sopenharmony_ci	c = 0;
11568c2ecf20Sopenharmony_ci	for (n = 0; n < out_sgs + in_sgs; n++) {
11578c2ecf20Sopenharmony_ci		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
11588c2ecf20Sopenharmony_ci			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
11598c2ecf20Sopenharmony_ci					DMA_TO_DEVICE : DMA_FROM_DEVICE);
11608c2ecf20Sopenharmony_ci			if (vring_mapping_error(vq, addr))
11618c2ecf20Sopenharmony_ci				goto unmap_release;
11628c2ecf20Sopenharmony_ci
11638c2ecf20Sopenharmony_ci			flags = cpu_to_le16(vq->packed.avail_used_flags |
11648c2ecf20Sopenharmony_ci				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
11658c2ecf20Sopenharmony_ci				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
11668c2ecf20Sopenharmony_ci			if (i == head)
11678c2ecf20Sopenharmony_ci				head_flags = flags;
11688c2ecf20Sopenharmony_ci			else
11698c2ecf20Sopenharmony_ci				desc[i].flags = flags;
11708c2ecf20Sopenharmony_ci
11718c2ecf20Sopenharmony_ci			desc[i].addr = cpu_to_le64(addr);
11728c2ecf20Sopenharmony_ci			desc[i].len = cpu_to_le32(sg->length);
11738c2ecf20Sopenharmony_ci			desc[i].id = cpu_to_le16(id);
11748c2ecf20Sopenharmony_ci
11758c2ecf20Sopenharmony_ci			if (unlikely(vq->use_dma_api)) {
11768c2ecf20Sopenharmony_ci				vq->packed.desc_extra[curr].addr = addr;
11778c2ecf20Sopenharmony_ci				vq->packed.desc_extra[curr].len = sg->length;
11788c2ecf20Sopenharmony_ci				vq->packed.desc_extra[curr].flags =
11798c2ecf20Sopenharmony_ci					le16_to_cpu(flags);
11808c2ecf20Sopenharmony_ci			}
11818c2ecf20Sopenharmony_ci			prev = curr;
11828c2ecf20Sopenharmony_ci			curr = vq->packed.desc_state[curr].next;
11838c2ecf20Sopenharmony_ci
11848c2ecf20Sopenharmony_ci			if ((unlikely(++i >= vq->packed.vring.num))) {
11858c2ecf20Sopenharmony_ci				i = 0;
11868c2ecf20Sopenharmony_ci				vq->packed.avail_used_flags ^=
11878c2ecf20Sopenharmony_ci					1 << VRING_PACKED_DESC_F_AVAIL |
11888c2ecf20Sopenharmony_ci					1 << VRING_PACKED_DESC_F_USED;
11898c2ecf20Sopenharmony_ci			}
11908c2ecf20Sopenharmony_ci		}
11918c2ecf20Sopenharmony_ci	}
11928c2ecf20Sopenharmony_ci
11938c2ecf20Sopenharmony_ci	if (i <= head)
11948c2ecf20Sopenharmony_ci		vq->packed.avail_wrap_counter ^= 1;
11958c2ecf20Sopenharmony_ci
11968c2ecf20Sopenharmony_ci	/* We're using some buffers from the free list. */
11978c2ecf20Sopenharmony_ci	vq->vq.num_free -= descs_used;
11988c2ecf20Sopenharmony_ci
11998c2ecf20Sopenharmony_ci	/* Update free pointer */
12008c2ecf20Sopenharmony_ci	vq->packed.next_avail_idx = i;
12018c2ecf20Sopenharmony_ci	vq->free_head = curr;
12028c2ecf20Sopenharmony_ci
12038c2ecf20Sopenharmony_ci	/* Store token. */
12048c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].num = descs_used;
12058c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].data = data;
12068c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].indir_desc = ctx;
12078c2ecf20Sopenharmony_ci	vq->packed.desc_state[id].last = prev;
12088c2ecf20Sopenharmony_ci
12098c2ecf20Sopenharmony_ci	/*
12108c2ecf20Sopenharmony_ci	 * A driver MUST NOT make the first descriptor in the list
12118c2ecf20Sopenharmony_ci	 * available before all subsequent descriptors comprising
12128c2ecf20Sopenharmony_ci	 * the list are made available.
12138c2ecf20Sopenharmony_ci	 */
12148c2ecf20Sopenharmony_ci	virtio_wmb(vq->weak_barriers);
12158c2ecf20Sopenharmony_ci	vq->packed.vring.desc[head].flags = head_flags;
12168c2ecf20Sopenharmony_ci	vq->num_added += descs_used;
12178c2ecf20Sopenharmony_ci
12188c2ecf20Sopenharmony_ci	pr_debug("Added buffer head %i to %p\n", head, vq);
12198c2ecf20Sopenharmony_ci	END_USE(vq);
12208c2ecf20Sopenharmony_ci
12218c2ecf20Sopenharmony_ci	return 0;
12228c2ecf20Sopenharmony_ci
12238c2ecf20Sopenharmony_ciunmap_release:
12248c2ecf20Sopenharmony_ci	err_idx = i;
12258c2ecf20Sopenharmony_ci	i = head;
12268c2ecf20Sopenharmony_ci
12278c2ecf20Sopenharmony_ci	vq->packed.avail_used_flags = avail_used_flags;
12288c2ecf20Sopenharmony_ci
12298c2ecf20Sopenharmony_ci	for (n = 0; n < total_sg; n++) {
12308c2ecf20Sopenharmony_ci		if (i == err_idx)
12318c2ecf20Sopenharmony_ci			break;
12328c2ecf20Sopenharmony_ci		vring_unmap_desc_packed(vq, &desc[i]);
12338c2ecf20Sopenharmony_ci		i++;
12348c2ecf20Sopenharmony_ci		if (i >= vq->packed.vring.num)
12358c2ecf20Sopenharmony_ci			i = 0;
12368c2ecf20Sopenharmony_ci	}
12378c2ecf20Sopenharmony_ci
12388c2ecf20Sopenharmony_ci	END_USE(vq);
12398c2ecf20Sopenharmony_ci	return -EIO;
12408c2ecf20Sopenharmony_ci}
12418c2ecf20Sopenharmony_ci
12428c2ecf20Sopenharmony_cistatic bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
12438c2ecf20Sopenharmony_ci{
12448c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
12458c2ecf20Sopenharmony_ci	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
12468c2ecf20Sopenharmony_ci	bool needs_kick;
12478c2ecf20Sopenharmony_ci	union {
12488c2ecf20Sopenharmony_ci		struct {
12498c2ecf20Sopenharmony_ci			__le16 off_wrap;
12508c2ecf20Sopenharmony_ci			__le16 flags;
12518c2ecf20Sopenharmony_ci		};
12528c2ecf20Sopenharmony_ci		u32 u32;
12538c2ecf20Sopenharmony_ci	} snapshot;
12548c2ecf20Sopenharmony_ci
12558c2ecf20Sopenharmony_ci	START_USE(vq);
12568c2ecf20Sopenharmony_ci
12578c2ecf20Sopenharmony_ci	/*
12588c2ecf20Sopenharmony_ci	 * We need to expose the new flags value before checking notification
12598c2ecf20Sopenharmony_ci	 * suppressions.
12608c2ecf20Sopenharmony_ci	 */
12618c2ecf20Sopenharmony_ci	virtio_mb(vq->weak_barriers);
12628c2ecf20Sopenharmony_ci
12638c2ecf20Sopenharmony_ci	old = vq->packed.next_avail_idx - vq->num_added;
12648c2ecf20Sopenharmony_ci	new = vq->packed.next_avail_idx;
12658c2ecf20Sopenharmony_ci	vq->num_added = 0;
12668c2ecf20Sopenharmony_ci
12678c2ecf20Sopenharmony_ci	snapshot.u32 = *(u32 *)vq->packed.vring.device;
12688c2ecf20Sopenharmony_ci	flags = le16_to_cpu(snapshot.flags);
12698c2ecf20Sopenharmony_ci
12708c2ecf20Sopenharmony_ci	LAST_ADD_TIME_CHECK(vq);
12718c2ecf20Sopenharmony_ci	LAST_ADD_TIME_INVALID(vq);
12728c2ecf20Sopenharmony_ci
12738c2ecf20Sopenharmony_ci	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
12748c2ecf20Sopenharmony_ci		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
12758c2ecf20Sopenharmony_ci		goto out;
12768c2ecf20Sopenharmony_ci	}
12778c2ecf20Sopenharmony_ci
12788c2ecf20Sopenharmony_ci	off_wrap = le16_to_cpu(snapshot.off_wrap);
12798c2ecf20Sopenharmony_ci
12808c2ecf20Sopenharmony_ci	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
12818c2ecf20Sopenharmony_ci	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
12828c2ecf20Sopenharmony_ci	if (wrap_counter != vq->packed.avail_wrap_counter)
12838c2ecf20Sopenharmony_ci		event_idx -= vq->packed.vring.num;
12848c2ecf20Sopenharmony_ci
12858c2ecf20Sopenharmony_ci	needs_kick = vring_need_event(event_idx, new, old);
12868c2ecf20Sopenharmony_ciout:
12878c2ecf20Sopenharmony_ci	END_USE(vq);
12888c2ecf20Sopenharmony_ci	return needs_kick;
12898c2ecf20Sopenharmony_ci}
12908c2ecf20Sopenharmony_ci
12918c2ecf20Sopenharmony_cistatic void detach_buf_packed(struct vring_virtqueue *vq,
12928c2ecf20Sopenharmony_ci			      unsigned int id, void **ctx)
12938c2ecf20Sopenharmony_ci{
12948c2ecf20Sopenharmony_ci	struct vring_desc_state_packed *state = NULL;
12958c2ecf20Sopenharmony_ci	struct vring_packed_desc *desc;
12968c2ecf20Sopenharmony_ci	unsigned int i, curr;
12978c2ecf20Sopenharmony_ci
12988c2ecf20Sopenharmony_ci	state = &vq->packed.desc_state[id];
12998c2ecf20Sopenharmony_ci
13008c2ecf20Sopenharmony_ci	/* Clear data ptr. */
13018c2ecf20Sopenharmony_ci	state->data = NULL;
13028c2ecf20Sopenharmony_ci
13038c2ecf20Sopenharmony_ci	vq->packed.desc_state[state->last].next = vq->free_head;
13048c2ecf20Sopenharmony_ci	vq->free_head = id;
13058c2ecf20Sopenharmony_ci	vq->vq.num_free += state->num;
13068c2ecf20Sopenharmony_ci
13078c2ecf20Sopenharmony_ci	if (unlikely(vq->use_dma_api)) {
13088c2ecf20Sopenharmony_ci		curr = id;
13098c2ecf20Sopenharmony_ci		for (i = 0; i < state->num; i++) {
13108c2ecf20Sopenharmony_ci			vring_unmap_state_packed(vq,
13118c2ecf20Sopenharmony_ci				&vq->packed.desc_extra[curr]);
13128c2ecf20Sopenharmony_ci			curr = vq->packed.desc_state[curr].next;
13138c2ecf20Sopenharmony_ci		}
13148c2ecf20Sopenharmony_ci	}
13158c2ecf20Sopenharmony_ci
13168c2ecf20Sopenharmony_ci	if (vq->indirect) {
13178c2ecf20Sopenharmony_ci		u32 len;
13188c2ecf20Sopenharmony_ci
13198c2ecf20Sopenharmony_ci		/* Free the indirect table, if any, now that it's unmapped. */
13208c2ecf20Sopenharmony_ci		desc = state->indir_desc;
13218c2ecf20Sopenharmony_ci		if (!desc)
13228c2ecf20Sopenharmony_ci			return;
13238c2ecf20Sopenharmony_ci
13248c2ecf20Sopenharmony_ci		if (vq->use_dma_api) {
13258c2ecf20Sopenharmony_ci			len = vq->packed.desc_extra[id].len;
13268c2ecf20Sopenharmony_ci			for (i = 0; i < len / sizeof(struct vring_packed_desc);
13278c2ecf20Sopenharmony_ci					i++)
13288c2ecf20Sopenharmony_ci				vring_unmap_desc_packed(vq, &desc[i]);
13298c2ecf20Sopenharmony_ci		}
13308c2ecf20Sopenharmony_ci		kfree(desc);
13318c2ecf20Sopenharmony_ci		state->indir_desc = NULL;
13328c2ecf20Sopenharmony_ci	} else if (ctx) {
13338c2ecf20Sopenharmony_ci		*ctx = state->indir_desc;
13348c2ecf20Sopenharmony_ci	}
13358c2ecf20Sopenharmony_ci}
13368c2ecf20Sopenharmony_ci
13378c2ecf20Sopenharmony_cistatic inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
13388c2ecf20Sopenharmony_ci				       u16 idx, bool used_wrap_counter)
13398c2ecf20Sopenharmony_ci{
13408c2ecf20Sopenharmony_ci	bool avail, used;
13418c2ecf20Sopenharmony_ci	u16 flags;
13428c2ecf20Sopenharmony_ci
13438c2ecf20Sopenharmony_ci	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
13448c2ecf20Sopenharmony_ci	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
13458c2ecf20Sopenharmony_ci	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
13468c2ecf20Sopenharmony_ci
13478c2ecf20Sopenharmony_ci	return avail == used && used == used_wrap_counter;
13488c2ecf20Sopenharmony_ci}
13498c2ecf20Sopenharmony_ci
13508c2ecf20Sopenharmony_cistatic inline bool more_used_packed(const struct vring_virtqueue *vq)
13518c2ecf20Sopenharmony_ci{
13528c2ecf20Sopenharmony_ci	return is_used_desc_packed(vq, vq->last_used_idx,
13538c2ecf20Sopenharmony_ci			vq->packed.used_wrap_counter);
13548c2ecf20Sopenharmony_ci}
13558c2ecf20Sopenharmony_ci
13568c2ecf20Sopenharmony_cistatic void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
13578c2ecf20Sopenharmony_ci					  unsigned int *len,
13588c2ecf20Sopenharmony_ci					  void **ctx)
13598c2ecf20Sopenharmony_ci{
13608c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
13618c2ecf20Sopenharmony_ci	u16 last_used, id;
13628c2ecf20Sopenharmony_ci	void *ret;
13638c2ecf20Sopenharmony_ci
13648c2ecf20Sopenharmony_ci	START_USE(vq);
13658c2ecf20Sopenharmony_ci
13668c2ecf20Sopenharmony_ci	if (unlikely(vq->broken)) {
13678c2ecf20Sopenharmony_ci		END_USE(vq);
13688c2ecf20Sopenharmony_ci		return NULL;
13698c2ecf20Sopenharmony_ci	}
13708c2ecf20Sopenharmony_ci
13718c2ecf20Sopenharmony_ci	if (!more_used_packed(vq)) {
13728c2ecf20Sopenharmony_ci		pr_debug("No more buffers in queue\n");
13738c2ecf20Sopenharmony_ci		END_USE(vq);
13748c2ecf20Sopenharmony_ci		return NULL;
13758c2ecf20Sopenharmony_ci	}
13768c2ecf20Sopenharmony_ci
13778c2ecf20Sopenharmony_ci	/* Only get used elements after they have been exposed by host. */
13788c2ecf20Sopenharmony_ci	virtio_rmb(vq->weak_barriers);
13798c2ecf20Sopenharmony_ci
13808c2ecf20Sopenharmony_ci	last_used = vq->last_used_idx;
13818c2ecf20Sopenharmony_ci	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
13828c2ecf20Sopenharmony_ci	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
13838c2ecf20Sopenharmony_ci
13848c2ecf20Sopenharmony_ci	if (unlikely(id >= vq->packed.vring.num)) {
13858c2ecf20Sopenharmony_ci		BAD_RING(vq, "id %u out of range\n", id);
13868c2ecf20Sopenharmony_ci		return NULL;
13878c2ecf20Sopenharmony_ci	}
13888c2ecf20Sopenharmony_ci	if (unlikely(!vq->packed.desc_state[id].data)) {
13898c2ecf20Sopenharmony_ci		BAD_RING(vq, "id %u is not a head!\n", id);
13908c2ecf20Sopenharmony_ci		return NULL;
13918c2ecf20Sopenharmony_ci	}
13928c2ecf20Sopenharmony_ci
13938c2ecf20Sopenharmony_ci	/* detach_buf_packed clears data, so grab it now. */
13948c2ecf20Sopenharmony_ci	ret = vq->packed.desc_state[id].data;
13958c2ecf20Sopenharmony_ci	detach_buf_packed(vq, id, ctx);
13968c2ecf20Sopenharmony_ci
13978c2ecf20Sopenharmony_ci	vq->last_used_idx += vq->packed.desc_state[id].num;
13988c2ecf20Sopenharmony_ci	if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
13998c2ecf20Sopenharmony_ci		vq->last_used_idx -= vq->packed.vring.num;
14008c2ecf20Sopenharmony_ci		vq->packed.used_wrap_counter ^= 1;
14018c2ecf20Sopenharmony_ci	}
14028c2ecf20Sopenharmony_ci
14038c2ecf20Sopenharmony_ci	/*
14048c2ecf20Sopenharmony_ci	 * If we expect an interrupt for the next entry, tell host
14058c2ecf20Sopenharmony_ci	 * by writing event index and flush out the write before
14068c2ecf20Sopenharmony_ci	 * the read in the next get_buf call.
14078c2ecf20Sopenharmony_ci	 */
14088c2ecf20Sopenharmony_ci	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
14098c2ecf20Sopenharmony_ci		virtio_store_mb(vq->weak_barriers,
14108c2ecf20Sopenharmony_ci				&vq->packed.vring.driver->off_wrap,
14118c2ecf20Sopenharmony_ci				cpu_to_le16(vq->last_used_idx |
14128c2ecf20Sopenharmony_ci					(vq->packed.used_wrap_counter <<
14138c2ecf20Sopenharmony_ci					 VRING_PACKED_EVENT_F_WRAP_CTR)));
14148c2ecf20Sopenharmony_ci
14158c2ecf20Sopenharmony_ci	LAST_ADD_TIME_INVALID(vq);
14168c2ecf20Sopenharmony_ci
14178c2ecf20Sopenharmony_ci	END_USE(vq);
14188c2ecf20Sopenharmony_ci	return ret;
14198c2ecf20Sopenharmony_ci}
14208c2ecf20Sopenharmony_ci
14218c2ecf20Sopenharmony_cistatic void virtqueue_disable_cb_packed(struct virtqueue *_vq)
14228c2ecf20Sopenharmony_ci{
14238c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
14248c2ecf20Sopenharmony_ci
14258c2ecf20Sopenharmony_ci	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
14268c2ecf20Sopenharmony_ci		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
14278c2ecf20Sopenharmony_ci		vq->packed.vring.driver->flags =
14288c2ecf20Sopenharmony_ci			cpu_to_le16(vq->packed.event_flags_shadow);
14298c2ecf20Sopenharmony_ci	}
14308c2ecf20Sopenharmony_ci}
14318c2ecf20Sopenharmony_ci
14328c2ecf20Sopenharmony_cistatic unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
14338c2ecf20Sopenharmony_ci{
14348c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
14358c2ecf20Sopenharmony_ci
14368c2ecf20Sopenharmony_ci	START_USE(vq);
14378c2ecf20Sopenharmony_ci
14388c2ecf20Sopenharmony_ci	/*
14398c2ecf20Sopenharmony_ci	 * We optimistically turn back on interrupts, then check if there was
14408c2ecf20Sopenharmony_ci	 * more to do.
14418c2ecf20Sopenharmony_ci	 */
14428c2ecf20Sopenharmony_ci
14438c2ecf20Sopenharmony_ci	if (vq->event) {
14448c2ecf20Sopenharmony_ci		vq->packed.vring.driver->off_wrap =
14458c2ecf20Sopenharmony_ci			cpu_to_le16(vq->last_used_idx |
14468c2ecf20Sopenharmony_ci				(vq->packed.used_wrap_counter <<
14478c2ecf20Sopenharmony_ci				 VRING_PACKED_EVENT_F_WRAP_CTR));
14488c2ecf20Sopenharmony_ci		/*
14498c2ecf20Sopenharmony_ci		 * We need to update event offset and event wrap
14508c2ecf20Sopenharmony_ci		 * counter first before updating event flags.
14518c2ecf20Sopenharmony_ci		 */
14528c2ecf20Sopenharmony_ci		virtio_wmb(vq->weak_barriers);
14538c2ecf20Sopenharmony_ci	}
14548c2ecf20Sopenharmony_ci
14558c2ecf20Sopenharmony_ci	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
14568c2ecf20Sopenharmony_ci		vq->packed.event_flags_shadow = vq->event ?
14578c2ecf20Sopenharmony_ci				VRING_PACKED_EVENT_FLAG_DESC :
14588c2ecf20Sopenharmony_ci				VRING_PACKED_EVENT_FLAG_ENABLE;
14598c2ecf20Sopenharmony_ci		vq->packed.vring.driver->flags =
14608c2ecf20Sopenharmony_ci				cpu_to_le16(vq->packed.event_flags_shadow);
14618c2ecf20Sopenharmony_ci	}
14628c2ecf20Sopenharmony_ci
14638c2ecf20Sopenharmony_ci	END_USE(vq);
14648c2ecf20Sopenharmony_ci	return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
14658c2ecf20Sopenharmony_ci			VRING_PACKED_EVENT_F_WRAP_CTR);
14668c2ecf20Sopenharmony_ci}
14678c2ecf20Sopenharmony_ci
14688c2ecf20Sopenharmony_cistatic bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
14698c2ecf20Sopenharmony_ci{
14708c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
14718c2ecf20Sopenharmony_ci	bool wrap_counter;
14728c2ecf20Sopenharmony_ci	u16 used_idx;
14738c2ecf20Sopenharmony_ci
14748c2ecf20Sopenharmony_ci	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
14758c2ecf20Sopenharmony_ci	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
14768c2ecf20Sopenharmony_ci
14778c2ecf20Sopenharmony_ci	return is_used_desc_packed(vq, used_idx, wrap_counter);
14788c2ecf20Sopenharmony_ci}
14798c2ecf20Sopenharmony_ci
14808c2ecf20Sopenharmony_cistatic bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
14818c2ecf20Sopenharmony_ci{
14828c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
14838c2ecf20Sopenharmony_ci	u16 used_idx, wrap_counter;
14848c2ecf20Sopenharmony_ci	u16 bufs;
14858c2ecf20Sopenharmony_ci
14868c2ecf20Sopenharmony_ci	START_USE(vq);
14878c2ecf20Sopenharmony_ci
14888c2ecf20Sopenharmony_ci	/*
14898c2ecf20Sopenharmony_ci	 * We optimistically turn back on interrupts, then check if there was
14908c2ecf20Sopenharmony_ci	 * more to do.
14918c2ecf20Sopenharmony_ci	 */
14928c2ecf20Sopenharmony_ci
14938c2ecf20Sopenharmony_ci	if (vq->event) {
14948c2ecf20Sopenharmony_ci		/* TODO: tune this threshold */
14958c2ecf20Sopenharmony_ci		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
14968c2ecf20Sopenharmony_ci		wrap_counter = vq->packed.used_wrap_counter;
14978c2ecf20Sopenharmony_ci
14988c2ecf20Sopenharmony_ci		used_idx = vq->last_used_idx + bufs;
14998c2ecf20Sopenharmony_ci		if (used_idx >= vq->packed.vring.num) {
15008c2ecf20Sopenharmony_ci			used_idx -= vq->packed.vring.num;
15018c2ecf20Sopenharmony_ci			wrap_counter ^= 1;
15028c2ecf20Sopenharmony_ci		}
15038c2ecf20Sopenharmony_ci
15048c2ecf20Sopenharmony_ci		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
15058c2ecf20Sopenharmony_ci			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
15068c2ecf20Sopenharmony_ci
15078c2ecf20Sopenharmony_ci		/*
15088c2ecf20Sopenharmony_ci		 * We need to update event offset and event wrap
15098c2ecf20Sopenharmony_ci		 * counter first before updating event flags.
15108c2ecf20Sopenharmony_ci		 */
15118c2ecf20Sopenharmony_ci		virtio_wmb(vq->weak_barriers);
15128c2ecf20Sopenharmony_ci	}
15138c2ecf20Sopenharmony_ci
15148c2ecf20Sopenharmony_ci	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
15158c2ecf20Sopenharmony_ci		vq->packed.event_flags_shadow = vq->event ?
15168c2ecf20Sopenharmony_ci				VRING_PACKED_EVENT_FLAG_DESC :
15178c2ecf20Sopenharmony_ci				VRING_PACKED_EVENT_FLAG_ENABLE;
15188c2ecf20Sopenharmony_ci		vq->packed.vring.driver->flags =
15198c2ecf20Sopenharmony_ci				cpu_to_le16(vq->packed.event_flags_shadow);
15208c2ecf20Sopenharmony_ci	}
15218c2ecf20Sopenharmony_ci
15228c2ecf20Sopenharmony_ci	/*
15238c2ecf20Sopenharmony_ci	 * We need to update event suppression structure first
15248c2ecf20Sopenharmony_ci	 * before re-checking for more used buffers.
15258c2ecf20Sopenharmony_ci	 */
15268c2ecf20Sopenharmony_ci	virtio_mb(vq->weak_barriers);
15278c2ecf20Sopenharmony_ci
15288c2ecf20Sopenharmony_ci	if (is_used_desc_packed(vq,
15298c2ecf20Sopenharmony_ci				vq->last_used_idx,
15308c2ecf20Sopenharmony_ci				vq->packed.used_wrap_counter)) {
15318c2ecf20Sopenharmony_ci		END_USE(vq);
15328c2ecf20Sopenharmony_ci		return false;
15338c2ecf20Sopenharmony_ci	}
15348c2ecf20Sopenharmony_ci
15358c2ecf20Sopenharmony_ci	END_USE(vq);
15368c2ecf20Sopenharmony_ci	return true;
15378c2ecf20Sopenharmony_ci}
15388c2ecf20Sopenharmony_ci
15398c2ecf20Sopenharmony_cistatic void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
15408c2ecf20Sopenharmony_ci{
15418c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
15428c2ecf20Sopenharmony_ci	unsigned int i;
15438c2ecf20Sopenharmony_ci	void *buf;
15448c2ecf20Sopenharmony_ci
15458c2ecf20Sopenharmony_ci	START_USE(vq);
15468c2ecf20Sopenharmony_ci
15478c2ecf20Sopenharmony_ci	for (i = 0; i < vq->packed.vring.num; i++) {
15488c2ecf20Sopenharmony_ci		if (!vq->packed.desc_state[i].data)
15498c2ecf20Sopenharmony_ci			continue;
15508c2ecf20Sopenharmony_ci		/* detach_buf clears data, so grab it now. */
15518c2ecf20Sopenharmony_ci		buf = vq->packed.desc_state[i].data;
15528c2ecf20Sopenharmony_ci		detach_buf_packed(vq, i, NULL);
15538c2ecf20Sopenharmony_ci		END_USE(vq);
15548c2ecf20Sopenharmony_ci		return buf;
15558c2ecf20Sopenharmony_ci	}
15568c2ecf20Sopenharmony_ci	/* That should have freed everything. */
15578c2ecf20Sopenharmony_ci	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
15588c2ecf20Sopenharmony_ci
15598c2ecf20Sopenharmony_ci	END_USE(vq);
15608c2ecf20Sopenharmony_ci	return NULL;
15618c2ecf20Sopenharmony_ci}
15628c2ecf20Sopenharmony_ci
15638c2ecf20Sopenharmony_cistatic struct virtqueue *vring_create_virtqueue_packed(
15648c2ecf20Sopenharmony_ci	unsigned int index,
15658c2ecf20Sopenharmony_ci	unsigned int num,
15668c2ecf20Sopenharmony_ci	unsigned int vring_align,
15678c2ecf20Sopenharmony_ci	struct virtio_device *vdev,
15688c2ecf20Sopenharmony_ci	bool weak_barriers,
15698c2ecf20Sopenharmony_ci	bool may_reduce_num,
15708c2ecf20Sopenharmony_ci	bool context,
15718c2ecf20Sopenharmony_ci	bool (*notify)(struct virtqueue *),
15728c2ecf20Sopenharmony_ci	void (*callback)(struct virtqueue *),
15738c2ecf20Sopenharmony_ci	const char *name)
15748c2ecf20Sopenharmony_ci{
15758c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq;
15768c2ecf20Sopenharmony_ci	struct vring_packed_desc *ring;
15778c2ecf20Sopenharmony_ci	struct vring_packed_desc_event *driver, *device;
15788c2ecf20Sopenharmony_ci	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
15798c2ecf20Sopenharmony_ci	size_t ring_size_in_bytes, event_size_in_bytes;
15808c2ecf20Sopenharmony_ci	unsigned int i;
15818c2ecf20Sopenharmony_ci
15828c2ecf20Sopenharmony_ci	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
15838c2ecf20Sopenharmony_ci
15848c2ecf20Sopenharmony_ci	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
15858c2ecf20Sopenharmony_ci				 &ring_dma_addr,
15868c2ecf20Sopenharmony_ci				 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
15878c2ecf20Sopenharmony_ci	if (!ring)
15888c2ecf20Sopenharmony_ci		goto err_ring;
15898c2ecf20Sopenharmony_ci
15908c2ecf20Sopenharmony_ci	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
15918c2ecf20Sopenharmony_ci
15928c2ecf20Sopenharmony_ci	driver = vring_alloc_queue(vdev, event_size_in_bytes,
15938c2ecf20Sopenharmony_ci				   &driver_event_dma_addr,
15948c2ecf20Sopenharmony_ci				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
15958c2ecf20Sopenharmony_ci	if (!driver)
15968c2ecf20Sopenharmony_ci		goto err_driver;
15978c2ecf20Sopenharmony_ci
15988c2ecf20Sopenharmony_ci	device = vring_alloc_queue(vdev, event_size_in_bytes,
15998c2ecf20Sopenharmony_ci				   &device_event_dma_addr,
16008c2ecf20Sopenharmony_ci				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
16018c2ecf20Sopenharmony_ci	if (!device)
16028c2ecf20Sopenharmony_ci		goto err_device;
16038c2ecf20Sopenharmony_ci
16048c2ecf20Sopenharmony_ci	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
16058c2ecf20Sopenharmony_ci	if (!vq)
16068c2ecf20Sopenharmony_ci		goto err_vq;
16078c2ecf20Sopenharmony_ci
16088c2ecf20Sopenharmony_ci	vq->vq.callback = callback;
16098c2ecf20Sopenharmony_ci	vq->vq.vdev = vdev;
16108c2ecf20Sopenharmony_ci	vq->vq.name = name;
16118c2ecf20Sopenharmony_ci	vq->vq.num_free = num;
16128c2ecf20Sopenharmony_ci	vq->vq.index = index;
16138c2ecf20Sopenharmony_ci	vq->we_own_ring = true;
16148c2ecf20Sopenharmony_ci	vq->notify = notify;
16158c2ecf20Sopenharmony_ci	vq->weak_barriers = weak_barriers;
16168c2ecf20Sopenharmony_ci	vq->broken = false;
16178c2ecf20Sopenharmony_ci	vq->last_used_idx = 0;
16188c2ecf20Sopenharmony_ci	vq->num_added = 0;
16198c2ecf20Sopenharmony_ci	vq->packed_ring = true;
16208c2ecf20Sopenharmony_ci	vq->use_dma_api = vring_use_dma_api(vdev);
16218c2ecf20Sopenharmony_ci#ifdef DEBUG
16228c2ecf20Sopenharmony_ci	vq->in_use = false;
16238c2ecf20Sopenharmony_ci	vq->last_add_time_valid = false;
16248c2ecf20Sopenharmony_ci#endif
16258c2ecf20Sopenharmony_ci
16268c2ecf20Sopenharmony_ci	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
16278c2ecf20Sopenharmony_ci		!context;
16288c2ecf20Sopenharmony_ci	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
16298c2ecf20Sopenharmony_ci
16308c2ecf20Sopenharmony_ci	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
16318c2ecf20Sopenharmony_ci		vq->weak_barriers = false;
16328c2ecf20Sopenharmony_ci
16338c2ecf20Sopenharmony_ci	vq->packed.ring_dma_addr = ring_dma_addr;
16348c2ecf20Sopenharmony_ci	vq->packed.driver_event_dma_addr = driver_event_dma_addr;
16358c2ecf20Sopenharmony_ci	vq->packed.device_event_dma_addr = device_event_dma_addr;
16368c2ecf20Sopenharmony_ci
16378c2ecf20Sopenharmony_ci	vq->packed.ring_size_in_bytes = ring_size_in_bytes;
16388c2ecf20Sopenharmony_ci	vq->packed.event_size_in_bytes = event_size_in_bytes;
16398c2ecf20Sopenharmony_ci
16408c2ecf20Sopenharmony_ci	vq->packed.vring.num = num;
16418c2ecf20Sopenharmony_ci	vq->packed.vring.desc = ring;
16428c2ecf20Sopenharmony_ci	vq->packed.vring.driver = driver;
16438c2ecf20Sopenharmony_ci	vq->packed.vring.device = device;
16448c2ecf20Sopenharmony_ci
16458c2ecf20Sopenharmony_ci	vq->packed.next_avail_idx = 0;
16468c2ecf20Sopenharmony_ci	vq->packed.avail_wrap_counter = 1;
16478c2ecf20Sopenharmony_ci	vq->packed.used_wrap_counter = 1;
16488c2ecf20Sopenharmony_ci	vq->packed.event_flags_shadow = 0;
16498c2ecf20Sopenharmony_ci	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
16508c2ecf20Sopenharmony_ci
16518c2ecf20Sopenharmony_ci	vq->packed.desc_state = kmalloc_array(num,
16528c2ecf20Sopenharmony_ci			sizeof(struct vring_desc_state_packed),
16538c2ecf20Sopenharmony_ci			GFP_KERNEL);
16548c2ecf20Sopenharmony_ci	if (!vq->packed.desc_state)
16558c2ecf20Sopenharmony_ci		goto err_desc_state;
16568c2ecf20Sopenharmony_ci
16578c2ecf20Sopenharmony_ci	memset(vq->packed.desc_state, 0,
16588c2ecf20Sopenharmony_ci		num * sizeof(struct vring_desc_state_packed));
16598c2ecf20Sopenharmony_ci
16608c2ecf20Sopenharmony_ci	/* Put everything in free lists. */
16618c2ecf20Sopenharmony_ci	vq->free_head = 0;
16628c2ecf20Sopenharmony_ci	for (i = 0; i < num-1; i++)
16638c2ecf20Sopenharmony_ci		vq->packed.desc_state[i].next = i + 1;
16648c2ecf20Sopenharmony_ci
16658c2ecf20Sopenharmony_ci	vq->packed.desc_extra = kmalloc_array(num,
16668c2ecf20Sopenharmony_ci			sizeof(struct vring_desc_extra_packed),
16678c2ecf20Sopenharmony_ci			GFP_KERNEL);
16688c2ecf20Sopenharmony_ci	if (!vq->packed.desc_extra)
16698c2ecf20Sopenharmony_ci		goto err_desc_extra;
16708c2ecf20Sopenharmony_ci
16718c2ecf20Sopenharmony_ci	memset(vq->packed.desc_extra, 0,
16728c2ecf20Sopenharmony_ci		num * sizeof(struct vring_desc_extra_packed));
16738c2ecf20Sopenharmony_ci
16748c2ecf20Sopenharmony_ci	/* No callback?  Tell other side not to bother us. */
16758c2ecf20Sopenharmony_ci	if (!callback) {
16768c2ecf20Sopenharmony_ci		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
16778c2ecf20Sopenharmony_ci		vq->packed.vring.driver->flags =
16788c2ecf20Sopenharmony_ci			cpu_to_le16(vq->packed.event_flags_shadow);
16798c2ecf20Sopenharmony_ci	}
16808c2ecf20Sopenharmony_ci
16818c2ecf20Sopenharmony_ci	spin_lock(&vdev->vqs_list_lock);
16828c2ecf20Sopenharmony_ci	list_add_tail(&vq->vq.list, &vdev->vqs);
16838c2ecf20Sopenharmony_ci	spin_unlock(&vdev->vqs_list_lock);
16848c2ecf20Sopenharmony_ci	return &vq->vq;
16858c2ecf20Sopenharmony_ci
16868c2ecf20Sopenharmony_cierr_desc_extra:
16878c2ecf20Sopenharmony_ci	kfree(vq->packed.desc_state);
16888c2ecf20Sopenharmony_cierr_desc_state:
16898c2ecf20Sopenharmony_ci	kfree(vq);
16908c2ecf20Sopenharmony_cierr_vq:
16918c2ecf20Sopenharmony_ci	vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
16928c2ecf20Sopenharmony_cierr_device:
16938c2ecf20Sopenharmony_ci	vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
16948c2ecf20Sopenharmony_cierr_driver:
16958c2ecf20Sopenharmony_ci	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
16968c2ecf20Sopenharmony_cierr_ring:
16978c2ecf20Sopenharmony_ci	return NULL;
16988c2ecf20Sopenharmony_ci}
16998c2ecf20Sopenharmony_ci
17008c2ecf20Sopenharmony_ci
17018c2ecf20Sopenharmony_ci/*
17028c2ecf20Sopenharmony_ci * Generic functions and exported symbols.
17038c2ecf20Sopenharmony_ci */
17048c2ecf20Sopenharmony_ci
17058c2ecf20Sopenharmony_cistatic inline int virtqueue_add(struct virtqueue *_vq,
17068c2ecf20Sopenharmony_ci				struct scatterlist *sgs[],
17078c2ecf20Sopenharmony_ci				unsigned int total_sg,
17088c2ecf20Sopenharmony_ci				unsigned int out_sgs,
17098c2ecf20Sopenharmony_ci				unsigned int in_sgs,
17108c2ecf20Sopenharmony_ci				void *data,
17118c2ecf20Sopenharmony_ci				void *ctx,
17128c2ecf20Sopenharmony_ci				gfp_t gfp)
17138c2ecf20Sopenharmony_ci{
17148c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
17158c2ecf20Sopenharmony_ci
17168c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
17178c2ecf20Sopenharmony_ci					out_sgs, in_sgs, data, ctx, gfp) :
17188c2ecf20Sopenharmony_ci				 virtqueue_add_split(_vq, sgs, total_sg,
17198c2ecf20Sopenharmony_ci					out_sgs, in_sgs, data, ctx, gfp);
17208c2ecf20Sopenharmony_ci}
17218c2ecf20Sopenharmony_ci
17228c2ecf20Sopenharmony_ci/**
17238c2ecf20Sopenharmony_ci * virtqueue_add_sgs - expose buffers to other end
17248c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
17258c2ecf20Sopenharmony_ci * @sgs: array of terminated scatterlists.
17268c2ecf20Sopenharmony_ci * @out_sgs: the number of scatterlists readable by other side
17278c2ecf20Sopenharmony_ci * @in_sgs: the number of scatterlists which are writable (after readable ones)
17288c2ecf20Sopenharmony_ci * @data: the token identifying the buffer.
17298c2ecf20Sopenharmony_ci * @gfp: how to do memory allocations (if necessary).
17308c2ecf20Sopenharmony_ci *
17318c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue operations
17328c2ecf20Sopenharmony_ci * at the same time (except where noted).
17338c2ecf20Sopenharmony_ci *
17348c2ecf20Sopenharmony_ci * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
17358c2ecf20Sopenharmony_ci */
17368c2ecf20Sopenharmony_ciint virtqueue_add_sgs(struct virtqueue *_vq,
17378c2ecf20Sopenharmony_ci		      struct scatterlist *sgs[],
17388c2ecf20Sopenharmony_ci		      unsigned int out_sgs,
17398c2ecf20Sopenharmony_ci		      unsigned int in_sgs,
17408c2ecf20Sopenharmony_ci		      void *data,
17418c2ecf20Sopenharmony_ci		      gfp_t gfp)
17428c2ecf20Sopenharmony_ci{
17438c2ecf20Sopenharmony_ci	unsigned int i, total_sg = 0;
17448c2ecf20Sopenharmony_ci
17458c2ecf20Sopenharmony_ci	/* Count them first. */
17468c2ecf20Sopenharmony_ci	for (i = 0; i < out_sgs + in_sgs; i++) {
17478c2ecf20Sopenharmony_ci		struct scatterlist *sg;
17488c2ecf20Sopenharmony_ci
17498c2ecf20Sopenharmony_ci		for (sg = sgs[i]; sg; sg = sg_next(sg))
17508c2ecf20Sopenharmony_ci			total_sg++;
17518c2ecf20Sopenharmony_ci	}
17528c2ecf20Sopenharmony_ci	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
17538c2ecf20Sopenharmony_ci			     data, NULL, gfp);
17548c2ecf20Sopenharmony_ci}
17558c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_add_sgs);
17568c2ecf20Sopenharmony_ci
17578c2ecf20Sopenharmony_ci/**
17588c2ecf20Sopenharmony_ci * virtqueue_add_outbuf - expose output buffers to other end
17598c2ecf20Sopenharmony_ci * @vq: the struct virtqueue we're talking about.
17608c2ecf20Sopenharmony_ci * @sg: scatterlist (must be well-formed and terminated!)
17618c2ecf20Sopenharmony_ci * @num: the number of entries in @sg readable by other side
17628c2ecf20Sopenharmony_ci * @data: the token identifying the buffer.
17638c2ecf20Sopenharmony_ci * @gfp: how to do memory allocations (if necessary).
17648c2ecf20Sopenharmony_ci *
17658c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue operations
17668c2ecf20Sopenharmony_ci * at the same time (except where noted).
17678c2ecf20Sopenharmony_ci *
17688c2ecf20Sopenharmony_ci * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
17698c2ecf20Sopenharmony_ci */
17708c2ecf20Sopenharmony_ciint virtqueue_add_outbuf(struct virtqueue *vq,
17718c2ecf20Sopenharmony_ci			 struct scatterlist *sg, unsigned int num,
17728c2ecf20Sopenharmony_ci			 void *data,
17738c2ecf20Sopenharmony_ci			 gfp_t gfp)
17748c2ecf20Sopenharmony_ci{
17758c2ecf20Sopenharmony_ci	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
17768c2ecf20Sopenharmony_ci}
17778c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
17788c2ecf20Sopenharmony_ci
17798c2ecf20Sopenharmony_ci/**
17808c2ecf20Sopenharmony_ci * virtqueue_add_inbuf - expose input buffers to other end
17818c2ecf20Sopenharmony_ci * @vq: the struct virtqueue we're talking about.
17828c2ecf20Sopenharmony_ci * @sg: scatterlist (must be well-formed and terminated!)
17838c2ecf20Sopenharmony_ci * @num: the number of entries in @sg writable by other side
17848c2ecf20Sopenharmony_ci * @data: the token identifying the buffer.
17858c2ecf20Sopenharmony_ci * @gfp: how to do memory allocations (if necessary).
17868c2ecf20Sopenharmony_ci *
17878c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue operations
17888c2ecf20Sopenharmony_ci * at the same time (except where noted).
17898c2ecf20Sopenharmony_ci *
17908c2ecf20Sopenharmony_ci * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
17918c2ecf20Sopenharmony_ci */
17928c2ecf20Sopenharmony_ciint virtqueue_add_inbuf(struct virtqueue *vq,
17938c2ecf20Sopenharmony_ci			struct scatterlist *sg, unsigned int num,
17948c2ecf20Sopenharmony_ci			void *data,
17958c2ecf20Sopenharmony_ci			gfp_t gfp)
17968c2ecf20Sopenharmony_ci{
17978c2ecf20Sopenharmony_ci	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
17988c2ecf20Sopenharmony_ci}
17998c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
18008c2ecf20Sopenharmony_ci
18018c2ecf20Sopenharmony_ci/**
18028c2ecf20Sopenharmony_ci * virtqueue_add_inbuf_ctx - expose input buffers to other end
18038c2ecf20Sopenharmony_ci * @vq: the struct virtqueue we're talking about.
18048c2ecf20Sopenharmony_ci * @sg: scatterlist (must be well-formed and terminated!)
18058c2ecf20Sopenharmony_ci * @num: the number of entries in @sg writable by other side
18068c2ecf20Sopenharmony_ci * @data: the token identifying the buffer.
18078c2ecf20Sopenharmony_ci * @ctx: extra context for the token
18088c2ecf20Sopenharmony_ci * @gfp: how to do memory allocations (if necessary).
18098c2ecf20Sopenharmony_ci *
18108c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue operations
18118c2ecf20Sopenharmony_ci * at the same time (except where noted).
18128c2ecf20Sopenharmony_ci *
18138c2ecf20Sopenharmony_ci * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
18148c2ecf20Sopenharmony_ci */
18158c2ecf20Sopenharmony_ciint virtqueue_add_inbuf_ctx(struct virtqueue *vq,
18168c2ecf20Sopenharmony_ci			struct scatterlist *sg, unsigned int num,
18178c2ecf20Sopenharmony_ci			void *data,
18188c2ecf20Sopenharmony_ci			void *ctx,
18198c2ecf20Sopenharmony_ci			gfp_t gfp)
18208c2ecf20Sopenharmony_ci{
18218c2ecf20Sopenharmony_ci	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
18228c2ecf20Sopenharmony_ci}
18238c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
18248c2ecf20Sopenharmony_ci
18258c2ecf20Sopenharmony_ci/**
18268c2ecf20Sopenharmony_ci * virtqueue_kick_prepare - first half of split virtqueue_kick call.
18278c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue
18288c2ecf20Sopenharmony_ci *
18298c2ecf20Sopenharmony_ci * Instead of virtqueue_kick(), you can do:
18308c2ecf20Sopenharmony_ci *	if (virtqueue_kick_prepare(vq))
18318c2ecf20Sopenharmony_ci *		virtqueue_notify(vq);
18328c2ecf20Sopenharmony_ci *
18338c2ecf20Sopenharmony_ci * This is sometimes useful because the virtqueue_kick_prepare() needs
18348c2ecf20Sopenharmony_ci * to be serialized, but the actual virtqueue_notify() call does not.
18358c2ecf20Sopenharmony_ci */
18368c2ecf20Sopenharmony_cibool virtqueue_kick_prepare(struct virtqueue *_vq)
18378c2ecf20Sopenharmony_ci{
18388c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
18398c2ecf20Sopenharmony_ci
18408c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
18418c2ecf20Sopenharmony_ci				 virtqueue_kick_prepare_split(_vq);
18428c2ecf20Sopenharmony_ci}
18438c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
18448c2ecf20Sopenharmony_ci
18458c2ecf20Sopenharmony_ci/**
18468c2ecf20Sopenharmony_ci * virtqueue_notify - second half of split virtqueue_kick call.
18478c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue
18488c2ecf20Sopenharmony_ci *
18498c2ecf20Sopenharmony_ci * This does not need to be serialized.
18508c2ecf20Sopenharmony_ci *
18518c2ecf20Sopenharmony_ci * Returns false if host notify failed or queue is broken, otherwise true.
18528c2ecf20Sopenharmony_ci */
18538c2ecf20Sopenharmony_cibool virtqueue_notify(struct virtqueue *_vq)
18548c2ecf20Sopenharmony_ci{
18558c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
18568c2ecf20Sopenharmony_ci
18578c2ecf20Sopenharmony_ci	if (unlikely(vq->broken))
18588c2ecf20Sopenharmony_ci		return false;
18598c2ecf20Sopenharmony_ci
18608c2ecf20Sopenharmony_ci	/* Prod other side to tell it about changes. */
18618c2ecf20Sopenharmony_ci	if (!vq->notify(_vq)) {
18628c2ecf20Sopenharmony_ci		vq->broken = true;
18638c2ecf20Sopenharmony_ci		return false;
18648c2ecf20Sopenharmony_ci	}
18658c2ecf20Sopenharmony_ci	return true;
18668c2ecf20Sopenharmony_ci}
18678c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_notify);
18688c2ecf20Sopenharmony_ci
18698c2ecf20Sopenharmony_ci/**
18708c2ecf20Sopenharmony_ci * virtqueue_kick - update after add_buf
18718c2ecf20Sopenharmony_ci * @vq: the struct virtqueue
18728c2ecf20Sopenharmony_ci *
18738c2ecf20Sopenharmony_ci * After one or more virtqueue_add_* calls, invoke this to kick
18748c2ecf20Sopenharmony_ci * the other side.
18758c2ecf20Sopenharmony_ci *
18768c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue
18778c2ecf20Sopenharmony_ci * operations at the same time (except where noted).
18788c2ecf20Sopenharmony_ci *
18798c2ecf20Sopenharmony_ci * Returns false if kick failed, otherwise true.
18808c2ecf20Sopenharmony_ci */
18818c2ecf20Sopenharmony_cibool virtqueue_kick(struct virtqueue *vq)
18828c2ecf20Sopenharmony_ci{
18838c2ecf20Sopenharmony_ci	if (virtqueue_kick_prepare(vq))
18848c2ecf20Sopenharmony_ci		return virtqueue_notify(vq);
18858c2ecf20Sopenharmony_ci	return true;
18868c2ecf20Sopenharmony_ci}
18878c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_kick);
18888c2ecf20Sopenharmony_ci
18898c2ecf20Sopenharmony_ci/**
18908c2ecf20Sopenharmony_ci * virtqueue_get_buf - get the next used buffer
18918c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
18928c2ecf20Sopenharmony_ci * @len: the length written into the buffer
18938c2ecf20Sopenharmony_ci * @ctx: extra context for the token
18948c2ecf20Sopenharmony_ci *
18958c2ecf20Sopenharmony_ci * If the device wrote data into the buffer, @len will be set to the
18968c2ecf20Sopenharmony_ci * amount written.  This means you don't need to clear the buffer
18978c2ecf20Sopenharmony_ci * beforehand to ensure there's no data leakage in the case of short
18988c2ecf20Sopenharmony_ci * writes.
18998c2ecf20Sopenharmony_ci *
19008c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue
19018c2ecf20Sopenharmony_ci * operations at the same time (except where noted).
19028c2ecf20Sopenharmony_ci *
19038c2ecf20Sopenharmony_ci * Returns NULL if there are no used buffers, or the "data" token
19048c2ecf20Sopenharmony_ci * handed to virtqueue_add_*().
19058c2ecf20Sopenharmony_ci */
19068c2ecf20Sopenharmony_civoid *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
19078c2ecf20Sopenharmony_ci			    void **ctx)
19088c2ecf20Sopenharmony_ci{
19098c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
19108c2ecf20Sopenharmony_ci
19118c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
19128c2ecf20Sopenharmony_ci				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
19138c2ecf20Sopenharmony_ci}
19148c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
19158c2ecf20Sopenharmony_ci
19168c2ecf20Sopenharmony_civoid *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
19178c2ecf20Sopenharmony_ci{
19188c2ecf20Sopenharmony_ci	return virtqueue_get_buf_ctx(_vq, len, NULL);
19198c2ecf20Sopenharmony_ci}
19208c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_buf);
19218c2ecf20Sopenharmony_ci/**
19228c2ecf20Sopenharmony_ci * virtqueue_disable_cb - disable callbacks
19238c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
19248c2ecf20Sopenharmony_ci *
19258c2ecf20Sopenharmony_ci * Note that this is not necessarily synchronous, hence unreliable and only
19268c2ecf20Sopenharmony_ci * useful as an optimization.
19278c2ecf20Sopenharmony_ci *
19288c2ecf20Sopenharmony_ci * Unlike other operations, this need not be serialized.
19298c2ecf20Sopenharmony_ci */
19308c2ecf20Sopenharmony_civoid virtqueue_disable_cb(struct virtqueue *_vq)
19318c2ecf20Sopenharmony_ci{
19328c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
19338c2ecf20Sopenharmony_ci
19348c2ecf20Sopenharmony_ci	if (vq->packed_ring)
19358c2ecf20Sopenharmony_ci		virtqueue_disable_cb_packed(_vq);
19368c2ecf20Sopenharmony_ci	else
19378c2ecf20Sopenharmony_ci		virtqueue_disable_cb_split(_vq);
19388c2ecf20Sopenharmony_ci}
19398c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_disable_cb);
19408c2ecf20Sopenharmony_ci
19418c2ecf20Sopenharmony_ci/**
19428c2ecf20Sopenharmony_ci * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
19438c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
19448c2ecf20Sopenharmony_ci *
19458c2ecf20Sopenharmony_ci * This re-enables callbacks; it returns current queue state
19468c2ecf20Sopenharmony_ci * in an opaque unsigned value. This value should be later tested by
19478c2ecf20Sopenharmony_ci * virtqueue_poll, to detect a possible race between the driver checking for
19488c2ecf20Sopenharmony_ci * more work, and enabling callbacks.
19498c2ecf20Sopenharmony_ci *
19508c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue
19518c2ecf20Sopenharmony_ci * operations at the same time (except where noted).
19528c2ecf20Sopenharmony_ci */
19538c2ecf20Sopenharmony_ciunsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
19548c2ecf20Sopenharmony_ci{
19558c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
19568c2ecf20Sopenharmony_ci
19578c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
19588c2ecf20Sopenharmony_ci				 virtqueue_enable_cb_prepare_split(_vq);
19598c2ecf20Sopenharmony_ci}
19608c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
19618c2ecf20Sopenharmony_ci
19628c2ecf20Sopenharmony_ci/**
19638c2ecf20Sopenharmony_ci * virtqueue_poll - query pending used buffers
19648c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
19658c2ecf20Sopenharmony_ci * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
19668c2ecf20Sopenharmony_ci *
19678c2ecf20Sopenharmony_ci * Returns "true" if there are pending used buffers in the queue.
19688c2ecf20Sopenharmony_ci *
19698c2ecf20Sopenharmony_ci * This does not need to be serialized.
19708c2ecf20Sopenharmony_ci */
19718c2ecf20Sopenharmony_cibool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
19728c2ecf20Sopenharmony_ci{
19738c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
19748c2ecf20Sopenharmony_ci
19758c2ecf20Sopenharmony_ci	if (unlikely(vq->broken))
19768c2ecf20Sopenharmony_ci		return false;
19778c2ecf20Sopenharmony_ci
19788c2ecf20Sopenharmony_ci	virtio_mb(vq->weak_barriers);
19798c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
19808c2ecf20Sopenharmony_ci				 virtqueue_poll_split(_vq, last_used_idx);
19818c2ecf20Sopenharmony_ci}
19828c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_poll);
19838c2ecf20Sopenharmony_ci
19848c2ecf20Sopenharmony_ci/**
19858c2ecf20Sopenharmony_ci * virtqueue_enable_cb - restart callbacks after disable_cb.
19868c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
19878c2ecf20Sopenharmony_ci *
19888c2ecf20Sopenharmony_ci * This re-enables callbacks; it returns "false" if there are pending
19898c2ecf20Sopenharmony_ci * buffers in the queue, to detect a possible race between the driver
19908c2ecf20Sopenharmony_ci * checking for more work, and enabling callbacks.
19918c2ecf20Sopenharmony_ci *
19928c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue
19938c2ecf20Sopenharmony_ci * operations at the same time (except where noted).
19948c2ecf20Sopenharmony_ci */
19958c2ecf20Sopenharmony_cibool virtqueue_enable_cb(struct virtqueue *_vq)
19968c2ecf20Sopenharmony_ci{
19978c2ecf20Sopenharmony_ci	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
19988c2ecf20Sopenharmony_ci
19998c2ecf20Sopenharmony_ci	return !virtqueue_poll(_vq, last_used_idx);
20008c2ecf20Sopenharmony_ci}
20018c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_enable_cb);
20028c2ecf20Sopenharmony_ci
20038c2ecf20Sopenharmony_ci/**
20048c2ecf20Sopenharmony_ci * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
20058c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
20068c2ecf20Sopenharmony_ci *
20078c2ecf20Sopenharmony_ci * This re-enables callbacks but hints to the other side to delay
20088c2ecf20Sopenharmony_ci * interrupts until most of the available buffers have been processed;
20098c2ecf20Sopenharmony_ci * it returns "false" if there are many pending buffers in the queue,
20108c2ecf20Sopenharmony_ci * to detect a possible race between the driver checking for more work,
20118c2ecf20Sopenharmony_ci * and enabling callbacks.
20128c2ecf20Sopenharmony_ci *
20138c2ecf20Sopenharmony_ci * Caller must ensure we don't call this with other virtqueue
20148c2ecf20Sopenharmony_ci * operations at the same time (except where noted).
20158c2ecf20Sopenharmony_ci */
20168c2ecf20Sopenharmony_cibool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
20178c2ecf20Sopenharmony_ci{
20188c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
20198c2ecf20Sopenharmony_ci
20208c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
20218c2ecf20Sopenharmony_ci				 virtqueue_enable_cb_delayed_split(_vq);
20228c2ecf20Sopenharmony_ci}
20238c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
20248c2ecf20Sopenharmony_ci
20258c2ecf20Sopenharmony_ci/**
20268c2ecf20Sopenharmony_ci * virtqueue_detach_unused_buf - detach first unused buffer
20278c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue we're talking about.
20288c2ecf20Sopenharmony_ci *
20298c2ecf20Sopenharmony_ci * Returns NULL or the "data" token handed to virtqueue_add_*().
20308c2ecf20Sopenharmony_ci * This is not valid on an active queue; it is useful only for device
20318c2ecf20Sopenharmony_ci * shutdown.
20328c2ecf20Sopenharmony_ci */
20338c2ecf20Sopenharmony_civoid *virtqueue_detach_unused_buf(struct virtqueue *_vq)
20348c2ecf20Sopenharmony_ci{
20358c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
20368c2ecf20Sopenharmony_ci
20378c2ecf20Sopenharmony_ci	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
20388c2ecf20Sopenharmony_ci				 virtqueue_detach_unused_buf_split(_vq);
20398c2ecf20Sopenharmony_ci}
20408c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
20418c2ecf20Sopenharmony_ci
20428c2ecf20Sopenharmony_cistatic inline bool more_used(const struct vring_virtqueue *vq)
20438c2ecf20Sopenharmony_ci{
20448c2ecf20Sopenharmony_ci	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
20458c2ecf20Sopenharmony_ci}
20468c2ecf20Sopenharmony_ci
20478c2ecf20Sopenharmony_ciirqreturn_t vring_interrupt(int irq, void *_vq)
20488c2ecf20Sopenharmony_ci{
20498c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
20508c2ecf20Sopenharmony_ci
20518c2ecf20Sopenharmony_ci	if (!more_used(vq)) {
20528c2ecf20Sopenharmony_ci		pr_debug("virtqueue interrupt with no work for %p\n", vq);
20538c2ecf20Sopenharmony_ci		return IRQ_NONE;
20548c2ecf20Sopenharmony_ci	}
20558c2ecf20Sopenharmony_ci
20568c2ecf20Sopenharmony_ci	if (unlikely(vq->broken))
20578c2ecf20Sopenharmony_ci		return IRQ_HANDLED;
20588c2ecf20Sopenharmony_ci
20598c2ecf20Sopenharmony_ci	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
20608c2ecf20Sopenharmony_ci	if (vq->vq.callback)
20618c2ecf20Sopenharmony_ci		vq->vq.callback(&vq->vq);
20628c2ecf20Sopenharmony_ci
20638c2ecf20Sopenharmony_ci	return IRQ_HANDLED;
20648c2ecf20Sopenharmony_ci}
20658c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(vring_interrupt);
20668c2ecf20Sopenharmony_ci
20678c2ecf20Sopenharmony_ci/* Only available for split ring */
20688c2ecf20Sopenharmony_cistruct virtqueue *__vring_new_virtqueue(unsigned int index,
20698c2ecf20Sopenharmony_ci					struct vring vring,
20708c2ecf20Sopenharmony_ci					struct virtio_device *vdev,
20718c2ecf20Sopenharmony_ci					bool weak_barriers,
20728c2ecf20Sopenharmony_ci					bool context,
20738c2ecf20Sopenharmony_ci					bool (*notify)(struct virtqueue *),
20748c2ecf20Sopenharmony_ci					void (*callback)(struct virtqueue *),
20758c2ecf20Sopenharmony_ci					const char *name)
20768c2ecf20Sopenharmony_ci{
20778c2ecf20Sopenharmony_ci	unsigned int i;
20788c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq;
20798c2ecf20Sopenharmony_ci
20808c2ecf20Sopenharmony_ci	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
20818c2ecf20Sopenharmony_ci		return NULL;
20828c2ecf20Sopenharmony_ci
20838c2ecf20Sopenharmony_ci	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
20848c2ecf20Sopenharmony_ci	if (!vq)
20858c2ecf20Sopenharmony_ci		return NULL;
20868c2ecf20Sopenharmony_ci
20878c2ecf20Sopenharmony_ci	vq->packed_ring = false;
20888c2ecf20Sopenharmony_ci	vq->vq.callback = callback;
20898c2ecf20Sopenharmony_ci	vq->vq.vdev = vdev;
20908c2ecf20Sopenharmony_ci	vq->vq.name = name;
20918c2ecf20Sopenharmony_ci	vq->vq.num_free = vring.num;
20928c2ecf20Sopenharmony_ci	vq->vq.index = index;
20938c2ecf20Sopenharmony_ci	vq->we_own_ring = false;
20948c2ecf20Sopenharmony_ci	vq->notify = notify;
20958c2ecf20Sopenharmony_ci	vq->weak_barriers = weak_barriers;
20968c2ecf20Sopenharmony_ci	vq->broken = false;
20978c2ecf20Sopenharmony_ci	vq->last_used_idx = 0;
20988c2ecf20Sopenharmony_ci	vq->num_added = 0;
20998c2ecf20Sopenharmony_ci	vq->use_dma_api = vring_use_dma_api(vdev);
21008c2ecf20Sopenharmony_ci#ifdef DEBUG
21018c2ecf20Sopenharmony_ci	vq->in_use = false;
21028c2ecf20Sopenharmony_ci	vq->last_add_time_valid = false;
21038c2ecf20Sopenharmony_ci#endif
21048c2ecf20Sopenharmony_ci
21058c2ecf20Sopenharmony_ci	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
21068c2ecf20Sopenharmony_ci		!context;
21078c2ecf20Sopenharmony_ci	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
21088c2ecf20Sopenharmony_ci
21098c2ecf20Sopenharmony_ci	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
21108c2ecf20Sopenharmony_ci		vq->weak_barriers = false;
21118c2ecf20Sopenharmony_ci
21128c2ecf20Sopenharmony_ci	vq->split.queue_dma_addr = 0;
21138c2ecf20Sopenharmony_ci	vq->split.queue_size_in_bytes = 0;
21148c2ecf20Sopenharmony_ci
21158c2ecf20Sopenharmony_ci	vq->split.vring = vring;
21168c2ecf20Sopenharmony_ci	vq->split.avail_flags_shadow = 0;
21178c2ecf20Sopenharmony_ci	vq->split.avail_idx_shadow = 0;
21188c2ecf20Sopenharmony_ci
21198c2ecf20Sopenharmony_ci	/* No callback?  Tell other side not to bother us. */
21208c2ecf20Sopenharmony_ci	if (!callback) {
21218c2ecf20Sopenharmony_ci		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
21228c2ecf20Sopenharmony_ci		if (!vq->event)
21238c2ecf20Sopenharmony_ci			vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
21248c2ecf20Sopenharmony_ci					vq->split.avail_flags_shadow);
21258c2ecf20Sopenharmony_ci	}
21268c2ecf20Sopenharmony_ci
21278c2ecf20Sopenharmony_ci	vq->split.desc_state = kmalloc_array(vring.num,
21288c2ecf20Sopenharmony_ci			sizeof(struct vring_desc_state_split), GFP_KERNEL);
21298c2ecf20Sopenharmony_ci	if (!vq->split.desc_state) {
21308c2ecf20Sopenharmony_ci		kfree(vq);
21318c2ecf20Sopenharmony_ci		return NULL;
21328c2ecf20Sopenharmony_ci	}
21338c2ecf20Sopenharmony_ci
21348c2ecf20Sopenharmony_ci	/* Put everything in free lists. */
21358c2ecf20Sopenharmony_ci	vq->free_head = 0;
21368c2ecf20Sopenharmony_ci	for (i = 0; i < vring.num-1; i++)
21378c2ecf20Sopenharmony_ci		vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
21388c2ecf20Sopenharmony_ci	memset(vq->split.desc_state, 0, vring.num *
21398c2ecf20Sopenharmony_ci			sizeof(struct vring_desc_state_split));
21408c2ecf20Sopenharmony_ci
21418c2ecf20Sopenharmony_ci	spin_lock(&vdev->vqs_list_lock);
21428c2ecf20Sopenharmony_ci	list_add_tail(&vq->vq.list, &vdev->vqs);
21438c2ecf20Sopenharmony_ci	spin_unlock(&vdev->vqs_list_lock);
21448c2ecf20Sopenharmony_ci	return &vq->vq;
21458c2ecf20Sopenharmony_ci}
21468c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__vring_new_virtqueue);
21478c2ecf20Sopenharmony_ci
21488c2ecf20Sopenharmony_cistruct virtqueue *vring_create_virtqueue(
21498c2ecf20Sopenharmony_ci	unsigned int index,
21508c2ecf20Sopenharmony_ci	unsigned int num,
21518c2ecf20Sopenharmony_ci	unsigned int vring_align,
21528c2ecf20Sopenharmony_ci	struct virtio_device *vdev,
21538c2ecf20Sopenharmony_ci	bool weak_barriers,
21548c2ecf20Sopenharmony_ci	bool may_reduce_num,
21558c2ecf20Sopenharmony_ci	bool context,
21568c2ecf20Sopenharmony_ci	bool (*notify)(struct virtqueue *),
21578c2ecf20Sopenharmony_ci	void (*callback)(struct virtqueue *),
21588c2ecf20Sopenharmony_ci	const char *name)
21598c2ecf20Sopenharmony_ci{
21608c2ecf20Sopenharmony_ci
21618c2ecf20Sopenharmony_ci	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
21628c2ecf20Sopenharmony_ci		return vring_create_virtqueue_packed(index, num, vring_align,
21638c2ecf20Sopenharmony_ci				vdev, weak_barriers, may_reduce_num,
21648c2ecf20Sopenharmony_ci				context, notify, callback, name);
21658c2ecf20Sopenharmony_ci
21668c2ecf20Sopenharmony_ci	return vring_create_virtqueue_split(index, num, vring_align,
21678c2ecf20Sopenharmony_ci			vdev, weak_barriers, may_reduce_num,
21688c2ecf20Sopenharmony_ci			context, notify, callback, name);
21698c2ecf20Sopenharmony_ci}
21708c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(vring_create_virtqueue);
21718c2ecf20Sopenharmony_ci
21728c2ecf20Sopenharmony_ci/* Only available for split ring */
21738c2ecf20Sopenharmony_cistruct virtqueue *vring_new_virtqueue(unsigned int index,
21748c2ecf20Sopenharmony_ci				      unsigned int num,
21758c2ecf20Sopenharmony_ci				      unsigned int vring_align,
21768c2ecf20Sopenharmony_ci				      struct virtio_device *vdev,
21778c2ecf20Sopenharmony_ci				      bool weak_barriers,
21788c2ecf20Sopenharmony_ci				      bool context,
21798c2ecf20Sopenharmony_ci				      void *pages,
21808c2ecf20Sopenharmony_ci				      bool (*notify)(struct virtqueue *vq),
21818c2ecf20Sopenharmony_ci				      void (*callback)(struct virtqueue *vq),
21828c2ecf20Sopenharmony_ci				      const char *name)
21838c2ecf20Sopenharmony_ci{
21848c2ecf20Sopenharmony_ci	struct vring vring;
21858c2ecf20Sopenharmony_ci
21868c2ecf20Sopenharmony_ci	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
21878c2ecf20Sopenharmony_ci		return NULL;
21888c2ecf20Sopenharmony_ci
21898c2ecf20Sopenharmony_ci	vring_init(&vring, num, pages, vring_align);
21908c2ecf20Sopenharmony_ci	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
21918c2ecf20Sopenharmony_ci				     notify, callback, name);
21928c2ecf20Sopenharmony_ci}
21938c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(vring_new_virtqueue);
21948c2ecf20Sopenharmony_ci
21958c2ecf20Sopenharmony_civoid vring_del_virtqueue(struct virtqueue *_vq)
21968c2ecf20Sopenharmony_ci{
21978c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
21988c2ecf20Sopenharmony_ci
21998c2ecf20Sopenharmony_ci	if (vq->we_own_ring) {
22008c2ecf20Sopenharmony_ci		if (vq->packed_ring) {
22018c2ecf20Sopenharmony_ci			vring_free_queue(vq->vq.vdev,
22028c2ecf20Sopenharmony_ci					 vq->packed.ring_size_in_bytes,
22038c2ecf20Sopenharmony_ci					 vq->packed.vring.desc,
22048c2ecf20Sopenharmony_ci					 vq->packed.ring_dma_addr);
22058c2ecf20Sopenharmony_ci
22068c2ecf20Sopenharmony_ci			vring_free_queue(vq->vq.vdev,
22078c2ecf20Sopenharmony_ci					 vq->packed.event_size_in_bytes,
22088c2ecf20Sopenharmony_ci					 vq->packed.vring.driver,
22098c2ecf20Sopenharmony_ci					 vq->packed.driver_event_dma_addr);
22108c2ecf20Sopenharmony_ci
22118c2ecf20Sopenharmony_ci			vring_free_queue(vq->vq.vdev,
22128c2ecf20Sopenharmony_ci					 vq->packed.event_size_in_bytes,
22138c2ecf20Sopenharmony_ci					 vq->packed.vring.device,
22148c2ecf20Sopenharmony_ci					 vq->packed.device_event_dma_addr);
22158c2ecf20Sopenharmony_ci
22168c2ecf20Sopenharmony_ci			kfree(vq->packed.desc_state);
22178c2ecf20Sopenharmony_ci			kfree(vq->packed.desc_extra);
22188c2ecf20Sopenharmony_ci		} else {
22198c2ecf20Sopenharmony_ci			vring_free_queue(vq->vq.vdev,
22208c2ecf20Sopenharmony_ci					 vq->split.queue_size_in_bytes,
22218c2ecf20Sopenharmony_ci					 vq->split.vring.desc,
22228c2ecf20Sopenharmony_ci					 vq->split.queue_dma_addr);
22238c2ecf20Sopenharmony_ci		}
22248c2ecf20Sopenharmony_ci	}
22258c2ecf20Sopenharmony_ci	if (!vq->packed_ring)
22268c2ecf20Sopenharmony_ci		kfree(vq->split.desc_state);
22278c2ecf20Sopenharmony_ci	spin_lock(&vq->vq.vdev->vqs_list_lock);
22288c2ecf20Sopenharmony_ci	list_del(&_vq->list);
22298c2ecf20Sopenharmony_ci	spin_unlock(&vq->vq.vdev->vqs_list_lock);
22308c2ecf20Sopenharmony_ci	kfree(vq);
22318c2ecf20Sopenharmony_ci}
22328c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(vring_del_virtqueue);
22338c2ecf20Sopenharmony_ci
22348c2ecf20Sopenharmony_ci/* Manipulates transport-specific feature bits. */
22358c2ecf20Sopenharmony_civoid vring_transport_features(struct virtio_device *vdev)
22368c2ecf20Sopenharmony_ci{
22378c2ecf20Sopenharmony_ci	unsigned int i;
22388c2ecf20Sopenharmony_ci
22398c2ecf20Sopenharmony_ci	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
22408c2ecf20Sopenharmony_ci		switch (i) {
22418c2ecf20Sopenharmony_ci		case VIRTIO_RING_F_INDIRECT_DESC:
22428c2ecf20Sopenharmony_ci			break;
22438c2ecf20Sopenharmony_ci		case VIRTIO_RING_F_EVENT_IDX:
22448c2ecf20Sopenharmony_ci			break;
22458c2ecf20Sopenharmony_ci		case VIRTIO_F_VERSION_1:
22468c2ecf20Sopenharmony_ci			break;
22478c2ecf20Sopenharmony_ci		case VIRTIO_F_ACCESS_PLATFORM:
22488c2ecf20Sopenharmony_ci			break;
22498c2ecf20Sopenharmony_ci		case VIRTIO_F_RING_PACKED:
22508c2ecf20Sopenharmony_ci			break;
22518c2ecf20Sopenharmony_ci		case VIRTIO_F_ORDER_PLATFORM:
22528c2ecf20Sopenharmony_ci			break;
22538c2ecf20Sopenharmony_ci		default:
22548c2ecf20Sopenharmony_ci			/* We don't understand this bit. */
22558c2ecf20Sopenharmony_ci			__virtio_clear_bit(vdev, i);
22568c2ecf20Sopenharmony_ci		}
22578c2ecf20Sopenharmony_ci	}
22588c2ecf20Sopenharmony_ci}
22598c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(vring_transport_features);
22608c2ecf20Sopenharmony_ci
22618c2ecf20Sopenharmony_ci/**
22628c2ecf20Sopenharmony_ci * virtqueue_get_vring_size - return the size of the virtqueue's vring
22638c2ecf20Sopenharmony_ci * @_vq: the struct virtqueue containing the vring of interest.
22648c2ecf20Sopenharmony_ci *
22658c2ecf20Sopenharmony_ci * Returns the size of the vring.  This is mainly used for boasting to
22668c2ecf20Sopenharmony_ci * userspace.  Unlike other operations, this need not be serialized.
22678c2ecf20Sopenharmony_ci */
22688c2ecf20Sopenharmony_ciunsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
22698c2ecf20Sopenharmony_ci{
22708c2ecf20Sopenharmony_ci
22718c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
22728c2ecf20Sopenharmony_ci
22738c2ecf20Sopenharmony_ci	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
22748c2ecf20Sopenharmony_ci}
22758c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
22768c2ecf20Sopenharmony_ci
22778c2ecf20Sopenharmony_cibool virtqueue_is_broken(struct virtqueue *_vq)
22788c2ecf20Sopenharmony_ci{
22798c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
22808c2ecf20Sopenharmony_ci
22818c2ecf20Sopenharmony_ci	return READ_ONCE(vq->broken);
22828c2ecf20Sopenharmony_ci}
22838c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_is_broken);
22848c2ecf20Sopenharmony_ci
22858c2ecf20Sopenharmony_ci/*
22868c2ecf20Sopenharmony_ci * This should prevent the device from being used, allowing drivers to
22878c2ecf20Sopenharmony_ci * recover.  You may need to grab appropriate locks to flush.
22888c2ecf20Sopenharmony_ci */
22898c2ecf20Sopenharmony_civoid virtio_break_device(struct virtio_device *dev)
22908c2ecf20Sopenharmony_ci{
22918c2ecf20Sopenharmony_ci	struct virtqueue *_vq;
22928c2ecf20Sopenharmony_ci
22938c2ecf20Sopenharmony_ci	spin_lock(&dev->vqs_list_lock);
22948c2ecf20Sopenharmony_ci	list_for_each_entry(_vq, &dev->vqs, list) {
22958c2ecf20Sopenharmony_ci		struct vring_virtqueue *vq = to_vvq(_vq);
22968c2ecf20Sopenharmony_ci
22978c2ecf20Sopenharmony_ci		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
22988c2ecf20Sopenharmony_ci		WRITE_ONCE(vq->broken, true);
22998c2ecf20Sopenharmony_ci	}
23008c2ecf20Sopenharmony_ci	spin_unlock(&dev->vqs_list_lock);
23018c2ecf20Sopenharmony_ci}
23028c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtio_break_device);
23038c2ecf20Sopenharmony_ci
23048c2ecf20Sopenharmony_cidma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
23058c2ecf20Sopenharmony_ci{
23068c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
23078c2ecf20Sopenharmony_ci
23088c2ecf20Sopenharmony_ci	BUG_ON(!vq->we_own_ring);
23098c2ecf20Sopenharmony_ci
23108c2ecf20Sopenharmony_ci	if (vq->packed_ring)
23118c2ecf20Sopenharmony_ci		return vq->packed.ring_dma_addr;
23128c2ecf20Sopenharmony_ci
23138c2ecf20Sopenharmony_ci	return vq->split.queue_dma_addr;
23148c2ecf20Sopenharmony_ci}
23158c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
23168c2ecf20Sopenharmony_ci
23178c2ecf20Sopenharmony_cidma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
23188c2ecf20Sopenharmony_ci{
23198c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
23208c2ecf20Sopenharmony_ci
23218c2ecf20Sopenharmony_ci	BUG_ON(!vq->we_own_ring);
23228c2ecf20Sopenharmony_ci
23238c2ecf20Sopenharmony_ci	if (vq->packed_ring)
23248c2ecf20Sopenharmony_ci		return vq->packed.driver_event_dma_addr;
23258c2ecf20Sopenharmony_ci
23268c2ecf20Sopenharmony_ci	return vq->split.queue_dma_addr +
23278c2ecf20Sopenharmony_ci		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
23288c2ecf20Sopenharmony_ci}
23298c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
23308c2ecf20Sopenharmony_ci
23318c2ecf20Sopenharmony_cidma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
23328c2ecf20Sopenharmony_ci{
23338c2ecf20Sopenharmony_ci	struct vring_virtqueue *vq = to_vvq(_vq);
23348c2ecf20Sopenharmony_ci
23358c2ecf20Sopenharmony_ci	BUG_ON(!vq->we_own_ring);
23368c2ecf20Sopenharmony_ci
23378c2ecf20Sopenharmony_ci	if (vq->packed_ring)
23388c2ecf20Sopenharmony_ci		return vq->packed.device_event_dma_addr;
23398c2ecf20Sopenharmony_ci
23408c2ecf20Sopenharmony_ci	return vq->split.queue_dma_addr +
23418c2ecf20Sopenharmony_ci		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
23428c2ecf20Sopenharmony_ci}
23438c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
23448c2ecf20Sopenharmony_ci
23458c2ecf20Sopenharmony_ci/* Only available for split ring */
23468c2ecf20Sopenharmony_ciconst struct vring *virtqueue_get_vring(struct virtqueue *vq)
23478c2ecf20Sopenharmony_ci{
23488c2ecf20Sopenharmony_ci	return &to_vvq(vq)->split.vring;
23498c2ecf20Sopenharmony_ci}
23508c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(virtqueue_get_vring);
23518c2ecf20Sopenharmony_ci
23528c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL");
2353