18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Copyright 2013-2015 Analog Devices Inc.
48c2ecf20Sopenharmony_ci *  Author: Lars-Peter Clausen <lars@metafoo.de>
58c2ecf20Sopenharmony_ci */
68c2ecf20Sopenharmony_ci
78c2ecf20Sopenharmony_ci#include <linux/slab.h>
88c2ecf20Sopenharmony_ci#include <linux/kernel.h>
98c2ecf20Sopenharmony_ci#include <linux/module.h>
108c2ecf20Sopenharmony_ci#include <linux/device.h>
118c2ecf20Sopenharmony_ci#include <linux/workqueue.h>
128c2ecf20Sopenharmony_ci#include <linux/mutex.h>
138c2ecf20Sopenharmony_ci#include <linux/sched.h>
148c2ecf20Sopenharmony_ci#include <linux/poll.h>
158c2ecf20Sopenharmony_ci#include <linux/iio/buffer_impl.h>
168c2ecf20Sopenharmony_ci#include <linux/iio/buffer-dma.h>
178c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h>
188c2ecf20Sopenharmony_ci#include <linux/sizes.h>
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ci/*
218c2ecf20Sopenharmony_ci * For DMA buffers the storage is sub-divided into so called blocks. Each block
228c2ecf20Sopenharmony_ci * has its own memory buffer. The size of the block is the granularity at which
238c2ecf20Sopenharmony_ci * memory is exchanged between the hardware and the application. Increasing the
248c2ecf20Sopenharmony_ci * basic unit of data exchange from one sample to one block decreases the
258c2ecf20Sopenharmony_ci * management overhead that is associated with each sample. E.g. if we say the
268c2ecf20Sopenharmony_ci * management overhead for one exchange is x and the unit of exchange is one
278c2ecf20Sopenharmony_ci * sample the overhead will be x for each sample. Whereas when using a block
288c2ecf20Sopenharmony_ci * which contains n samples the overhead per sample is reduced to x/n. This
298c2ecf20Sopenharmony_ci * allows to achieve much higher samplerates than what can be sustained with
308c2ecf20Sopenharmony_ci * the one sample approach.
318c2ecf20Sopenharmony_ci *
328c2ecf20Sopenharmony_ci * Blocks are exchanged between the DMA controller and the application via the
338c2ecf20Sopenharmony_ci * means of two queues. The incoming queue and the outgoing queue. Blocks on the
348c2ecf20Sopenharmony_ci * incoming queue are waiting for the DMA controller to pick them up and fill
358c2ecf20Sopenharmony_ci * them with data. Block on the outgoing queue have been filled with data and
368c2ecf20Sopenharmony_ci * are waiting for the application to dequeue them and read the data.
378c2ecf20Sopenharmony_ci *
388c2ecf20Sopenharmony_ci * A block can be in one of the following states:
398c2ecf20Sopenharmony_ci *  * Owned by the application. In this state the application can read data from
408c2ecf20Sopenharmony_ci *    the block.
418c2ecf20Sopenharmony_ci *  * On the incoming list: Blocks on the incoming list are queued up to be
428c2ecf20Sopenharmony_ci *    processed by the DMA controller.
438c2ecf20Sopenharmony_ci *  * Owned by the DMA controller: The DMA controller is processing the block
448c2ecf20Sopenharmony_ci *    and filling it with data.
458c2ecf20Sopenharmony_ci *  * On the outgoing list: Blocks on the outgoing list have been successfully
468c2ecf20Sopenharmony_ci *    processed by the DMA controller and contain data. They can be dequeued by
478c2ecf20Sopenharmony_ci *    the application.
488c2ecf20Sopenharmony_ci *  * Dead: A block that is dead has been marked as to be freed. It might still
498c2ecf20Sopenharmony_ci *    be owned by either the application or the DMA controller at the moment.
508c2ecf20Sopenharmony_ci *    But once they are done processing it instead of going to either the
518c2ecf20Sopenharmony_ci *    incoming or outgoing queue the block will be freed.
528c2ecf20Sopenharmony_ci *
538c2ecf20Sopenharmony_ci * In addition to this blocks are reference counted and the memory associated
548c2ecf20Sopenharmony_ci * with both the block structure as well as the storage memory for the block
558c2ecf20Sopenharmony_ci * will be freed when the last reference to the block is dropped. This means a
568c2ecf20Sopenharmony_ci * block must not be accessed without holding a reference.
578c2ecf20Sopenharmony_ci *
588c2ecf20Sopenharmony_ci * The iio_dma_buffer implementation provides a generic infrastructure for
598c2ecf20Sopenharmony_ci * managing the blocks.
608c2ecf20Sopenharmony_ci *
618c2ecf20Sopenharmony_ci * A driver for a specific piece of hardware that has DMA capabilities need to
628c2ecf20Sopenharmony_ci * implement the submit() callback from the iio_dma_buffer_ops structure. This
638c2ecf20Sopenharmony_ci * callback is supposed to initiate the DMA transfer copying data from the
648c2ecf20Sopenharmony_ci * converter to the memory region of the block. Once the DMA transfer has been
658c2ecf20Sopenharmony_ci * completed the driver must call iio_dma_buffer_block_done() for the completed
668c2ecf20Sopenharmony_ci * block.
678c2ecf20Sopenharmony_ci *
688c2ecf20Sopenharmony_ci * Prior to this it must set the bytes_used field of the block contains
698c2ecf20Sopenharmony_ci * the actual number of bytes in the buffer. Typically this will be equal to the
708c2ecf20Sopenharmony_ci * size of the block, but if the DMA hardware has certain alignment requirements
718c2ecf20Sopenharmony_ci * for the transfer length it might choose to use less than the full size. In
728c2ecf20Sopenharmony_ci * either case it is expected that bytes_used is a multiple of the bytes per
738c2ecf20Sopenharmony_ci * datum, i.e. the block must not contain partial samples.
748c2ecf20Sopenharmony_ci *
758c2ecf20Sopenharmony_ci * The driver must call iio_dma_buffer_block_done() for each block it has
768c2ecf20Sopenharmony_ci * received through its submit_block() callback, even if it does not actually
778c2ecf20Sopenharmony_ci * perform a DMA transfer for the block, e.g. because the buffer was disabled
788c2ecf20Sopenharmony_ci * before the block transfer was started. In this case it should set bytes_used
798c2ecf20Sopenharmony_ci * to 0.
808c2ecf20Sopenharmony_ci *
818c2ecf20Sopenharmony_ci * In addition it is recommended that a driver implements the abort() callback.
828c2ecf20Sopenharmony_ci * It will be called when the buffer is disabled and can be used to cancel
838c2ecf20Sopenharmony_ci * pending and stop active transfers.
848c2ecf20Sopenharmony_ci *
858c2ecf20Sopenharmony_ci * The specific driver implementation should use the default callback
868c2ecf20Sopenharmony_ci * implementations provided by this module for the iio_buffer_access_funcs
878c2ecf20Sopenharmony_ci * struct. It may overload some callbacks with custom variants if the hardware
888c2ecf20Sopenharmony_ci * has special requirements that are not handled by the generic functions. If a
898c2ecf20Sopenharmony_ci * driver chooses to overload a callback it has to ensure that the generic
908c2ecf20Sopenharmony_ci * callback is called from within the custom callback.
918c2ecf20Sopenharmony_ci */
928c2ecf20Sopenharmony_ci
938c2ecf20Sopenharmony_cistatic void iio_buffer_block_release(struct kref *kref)
948c2ecf20Sopenharmony_ci{
958c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block = container_of(kref,
968c2ecf20Sopenharmony_ci		struct iio_dma_buffer_block, kref);
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
998c2ecf20Sopenharmony_ci
1008c2ecf20Sopenharmony_ci	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
1018c2ecf20Sopenharmony_ci					block->vaddr, block->phys_addr);
1028c2ecf20Sopenharmony_ci
1038c2ecf20Sopenharmony_ci	iio_buffer_put(&block->queue->buffer);
1048c2ecf20Sopenharmony_ci	kfree(block);
1058c2ecf20Sopenharmony_ci}
1068c2ecf20Sopenharmony_ci
1078c2ecf20Sopenharmony_cistatic void iio_buffer_block_get(struct iio_dma_buffer_block *block)
1088c2ecf20Sopenharmony_ci{
1098c2ecf20Sopenharmony_ci	kref_get(&block->kref);
1108c2ecf20Sopenharmony_ci}
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_cistatic void iio_buffer_block_put(struct iio_dma_buffer_block *block)
1138c2ecf20Sopenharmony_ci{
1148c2ecf20Sopenharmony_ci	kref_put(&block->kref, iio_buffer_block_release);
1158c2ecf20Sopenharmony_ci}
1168c2ecf20Sopenharmony_ci
1178c2ecf20Sopenharmony_ci/*
1188c2ecf20Sopenharmony_ci * dma_free_coherent can sleep, hence we need to take some special care to be
1198c2ecf20Sopenharmony_ci * able to drop a reference from an atomic context.
1208c2ecf20Sopenharmony_ci */
1218c2ecf20Sopenharmony_cistatic LIST_HEAD(iio_dma_buffer_dead_blocks);
1228c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
1238c2ecf20Sopenharmony_ci
1248c2ecf20Sopenharmony_cistatic void iio_dma_buffer_cleanup_worker(struct work_struct *work)
1258c2ecf20Sopenharmony_ci{
1268c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block, *_block;
1278c2ecf20Sopenharmony_ci	LIST_HEAD(block_list);
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_ci	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
1308c2ecf20Sopenharmony_ci	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
1318c2ecf20Sopenharmony_ci	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
1328c2ecf20Sopenharmony_ci
1338c2ecf20Sopenharmony_ci	list_for_each_entry_safe(block, _block, &block_list, head)
1348c2ecf20Sopenharmony_ci		iio_buffer_block_release(&block->kref);
1358c2ecf20Sopenharmony_ci}
1368c2ecf20Sopenharmony_cistatic DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
1378c2ecf20Sopenharmony_ci
1388c2ecf20Sopenharmony_cistatic void iio_buffer_block_release_atomic(struct kref *kref)
1398c2ecf20Sopenharmony_ci{
1408c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block;
1418c2ecf20Sopenharmony_ci	unsigned long flags;
1428c2ecf20Sopenharmony_ci
1438c2ecf20Sopenharmony_ci	block = container_of(kref, struct iio_dma_buffer_block, kref);
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci	spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
1468c2ecf20Sopenharmony_ci	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
1478c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci	schedule_work(&iio_dma_buffer_cleanup_work);
1508c2ecf20Sopenharmony_ci}
1518c2ecf20Sopenharmony_ci
1528c2ecf20Sopenharmony_ci/*
1538c2ecf20Sopenharmony_ci * Version of iio_buffer_block_put() that can be called from atomic context
1548c2ecf20Sopenharmony_ci */
1558c2ecf20Sopenharmony_cistatic void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
1568c2ecf20Sopenharmony_ci{
1578c2ecf20Sopenharmony_ci	kref_put(&block->kref, iio_buffer_block_release_atomic);
1588c2ecf20Sopenharmony_ci}
1598c2ecf20Sopenharmony_ci
1608c2ecf20Sopenharmony_cistatic struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
1618c2ecf20Sopenharmony_ci{
1628c2ecf20Sopenharmony_ci	return container_of(buf, struct iio_dma_buffer_queue, buffer);
1638c2ecf20Sopenharmony_ci}
1648c2ecf20Sopenharmony_ci
1658c2ecf20Sopenharmony_cistatic struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
1668c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue, size_t size)
1678c2ecf20Sopenharmony_ci{
1688c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block;
1698c2ecf20Sopenharmony_ci
1708c2ecf20Sopenharmony_ci	block = kzalloc(sizeof(*block), GFP_KERNEL);
1718c2ecf20Sopenharmony_ci	if (!block)
1728c2ecf20Sopenharmony_ci		return NULL;
1738c2ecf20Sopenharmony_ci
1748c2ecf20Sopenharmony_ci	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
1758c2ecf20Sopenharmony_ci		&block->phys_addr, GFP_KERNEL);
1768c2ecf20Sopenharmony_ci	if (!block->vaddr) {
1778c2ecf20Sopenharmony_ci		kfree(block);
1788c2ecf20Sopenharmony_ci		return NULL;
1798c2ecf20Sopenharmony_ci	}
1808c2ecf20Sopenharmony_ci
1818c2ecf20Sopenharmony_ci	block->size = size;
1828c2ecf20Sopenharmony_ci	block->state = IIO_BLOCK_STATE_DEQUEUED;
1838c2ecf20Sopenharmony_ci	block->queue = queue;
1848c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&block->head);
1858c2ecf20Sopenharmony_ci	kref_init(&block->kref);
1868c2ecf20Sopenharmony_ci
1878c2ecf20Sopenharmony_ci	iio_buffer_get(&queue->buffer);
1888c2ecf20Sopenharmony_ci
1898c2ecf20Sopenharmony_ci	return block;
1908c2ecf20Sopenharmony_ci}
1918c2ecf20Sopenharmony_ci
1928c2ecf20Sopenharmony_cistatic void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
1938c2ecf20Sopenharmony_ci{
1948c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = block->queue;
1958c2ecf20Sopenharmony_ci
1968c2ecf20Sopenharmony_ci	/*
1978c2ecf20Sopenharmony_ci	 * The buffer has already been freed by the application, just drop the
1988c2ecf20Sopenharmony_ci	 * reference.
1998c2ecf20Sopenharmony_ci	 */
2008c2ecf20Sopenharmony_ci	if (block->state != IIO_BLOCK_STATE_DEAD) {
2018c2ecf20Sopenharmony_ci		block->state = IIO_BLOCK_STATE_DONE;
2028c2ecf20Sopenharmony_ci		list_add_tail(&block->head, &queue->outgoing);
2038c2ecf20Sopenharmony_ci	}
2048c2ecf20Sopenharmony_ci}
2058c2ecf20Sopenharmony_ci
2068c2ecf20Sopenharmony_ci/**
2078c2ecf20Sopenharmony_ci * iio_dma_buffer_block_done() - Indicate that a block has been completed
2088c2ecf20Sopenharmony_ci * @block: The completed block
2098c2ecf20Sopenharmony_ci *
2108c2ecf20Sopenharmony_ci * Should be called when the DMA controller has finished handling the block to
2118c2ecf20Sopenharmony_ci * pass back ownership of the block to the queue.
2128c2ecf20Sopenharmony_ci */
2138c2ecf20Sopenharmony_civoid iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
2148c2ecf20Sopenharmony_ci{
2158c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = block->queue;
2168c2ecf20Sopenharmony_ci	unsigned long flags;
2178c2ecf20Sopenharmony_ci
2188c2ecf20Sopenharmony_ci	spin_lock_irqsave(&queue->list_lock, flags);
2198c2ecf20Sopenharmony_ci	_iio_dma_buffer_block_done(block);
2208c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&queue->list_lock, flags);
2218c2ecf20Sopenharmony_ci
2228c2ecf20Sopenharmony_ci	iio_buffer_block_put_atomic(block);
2238c2ecf20Sopenharmony_ci	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
2248c2ecf20Sopenharmony_ci}
2258c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
2268c2ecf20Sopenharmony_ci
2278c2ecf20Sopenharmony_ci/**
2288c2ecf20Sopenharmony_ci * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
2298c2ecf20Sopenharmony_ci *   aborted
2308c2ecf20Sopenharmony_ci * @queue: Queue for which to complete blocks.
2318c2ecf20Sopenharmony_ci * @list: List of aborted blocks. All blocks in this list must be from @queue.
2328c2ecf20Sopenharmony_ci *
2338c2ecf20Sopenharmony_ci * Typically called from the abort() callback after the DMA controller has been
2348c2ecf20Sopenharmony_ci * stopped. This will set bytes_used to 0 for each block in the list and then
2358c2ecf20Sopenharmony_ci * hand the blocks back to the queue.
2368c2ecf20Sopenharmony_ci */
2378c2ecf20Sopenharmony_civoid iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
2388c2ecf20Sopenharmony_ci	struct list_head *list)
2398c2ecf20Sopenharmony_ci{
2408c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block, *_block;
2418c2ecf20Sopenharmony_ci	unsigned long flags;
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	spin_lock_irqsave(&queue->list_lock, flags);
2448c2ecf20Sopenharmony_ci	list_for_each_entry_safe(block, _block, list, head) {
2458c2ecf20Sopenharmony_ci		list_del(&block->head);
2468c2ecf20Sopenharmony_ci		block->bytes_used = 0;
2478c2ecf20Sopenharmony_ci		_iio_dma_buffer_block_done(block);
2488c2ecf20Sopenharmony_ci		iio_buffer_block_put_atomic(block);
2498c2ecf20Sopenharmony_ci	}
2508c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&queue->list_lock, flags);
2518c2ecf20Sopenharmony_ci
2528c2ecf20Sopenharmony_ci	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
2538c2ecf20Sopenharmony_ci}
2548c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
2558c2ecf20Sopenharmony_ci
2568c2ecf20Sopenharmony_cistatic bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
2578c2ecf20Sopenharmony_ci{
2588c2ecf20Sopenharmony_ci	/*
2598c2ecf20Sopenharmony_ci	 * If the core owns the block it can be re-used. This should be the
2608c2ecf20Sopenharmony_ci	 * default case when enabling the buffer, unless the DMA controller does
2618c2ecf20Sopenharmony_ci	 * not support abort and has not given back the block yet.
2628c2ecf20Sopenharmony_ci	 */
2638c2ecf20Sopenharmony_ci	switch (block->state) {
2648c2ecf20Sopenharmony_ci	case IIO_BLOCK_STATE_DEQUEUED:
2658c2ecf20Sopenharmony_ci	case IIO_BLOCK_STATE_QUEUED:
2668c2ecf20Sopenharmony_ci	case IIO_BLOCK_STATE_DONE:
2678c2ecf20Sopenharmony_ci		return true;
2688c2ecf20Sopenharmony_ci	default:
2698c2ecf20Sopenharmony_ci		return false;
2708c2ecf20Sopenharmony_ci	}
2718c2ecf20Sopenharmony_ci}
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci/**
2748c2ecf20Sopenharmony_ci * iio_dma_buffer_request_update() - DMA buffer request_update callback
2758c2ecf20Sopenharmony_ci * @buffer: The buffer which to request an update
2768c2ecf20Sopenharmony_ci *
2778c2ecf20Sopenharmony_ci * Should be used as the iio_dma_buffer_request_update() callback for
2788c2ecf20Sopenharmony_ci * iio_buffer_access_ops struct for DMA buffers.
2798c2ecf20Sopenharmony_ci */
2808c2ecf20Sopenharmony_ciint iio_dma_buffer_request_update(struct iio_buffer *buffer)
2818c2ecf20Sopenharmony_ci{
2828c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
2838c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block;
2848c2ecf20Sopenharmony_ci	bool try_reuse = false;
2858c2ecf20Sopenharmony_ci	size_t size;
2868c2ecf20Sopenharmony_ci	int ret = 0;
2878c2ecf20Sopenharmony_ci	int i;
2888c2ecf20Sopenharmony_ci
2898c2ecf20Sopenharmony_ci	/*
2908c2ecf20Sopenharmony_ci	 * Split the buffer into two even parts. This is used as a double
2918c2ecf20Sopenharmony_ci	 * buffering scheme with usually one block at a time being used by the
2928c2ecf20Sopenharmony_ci	 * DMA and the other one by the application.
2938c2ecf20Sopenharmony_ci	 */
2948c2ecf20Sopenharmony_ci	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
2958c2ecf20Sopenharmony_ci		queue->buffer.length, 2);
2968c2ecf20Sopenharmony_ci
2978c2ecf20Sopenharmony_ci	mutex_lock(&queue->lock);
2988c2ecf20Sopenharmony_ci
2998c2ecf20Sopenharmony_ci	/* Allocations are page aligned */
3008c2ecf20Sopenharmony_ci	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
3018c2ecf20Sopenharmony_ci		try_reuse = true;
3028c2ecf20Sopenharmony_ci
3038c2ecf20Sopenharmony_ci	queue->fileio.block_size = size;
3048c2ecf20Sopenharmony_ci	queue->fileio.active_block = NULL;
3058c2ecf20Sopenharmony_ci
3068c2ecf20Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
3078c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
3088c2ecf20Sopenharmony_ci		block = queue->fileio.blocks[i];
3098c2ecf20Sopenharmony_ci
3108c2ecf20Sopenharmony_ci		/* If we can't re-use it free it */
3118c2ecf20Sopenharmony_ci		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
3128c2ecf20Sopenharmony_ci			block->state = IIO_BLOCK_STATE_DEAD;
3138c2ecf20Sopenharmony_ci	}
3148c2ecf20Sopenharmony_ci
3158c2ecf20Sopenharmony_ci	/*
3168c2ecf20Sopenharmony_ci	 * At this point all blocks are either owned by the core or marked as
3178c2ecf20Sopenharmony_ci	 * dead. This means we can reset the lists without having to fear
3188c2ecf20Sopenharmony_ci	 * corrution.
3198c2ecf20Sopenharmony_ci	 */
3208c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&queue->outgoing);
3218c2ecf20Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
3228c2ecf20Sopenharmony_ci
3238c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&queue->incoming);
3248c2ecf20Sopenharmony_ci
3258c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
3268c2ecf20Sopenharmony_ci		if (queue->fileio.blocks[i]) {
3278c2ecf20Sopenharmony_ci			block = queue->fileio.blocks[i];
3288c2ecf20Sopenharmony_ci			if (block->state == IIO_BLOCK_STATE_DEAD) {
3298c2ecf20Sopenharmony_ci				/* Could not reuse it */
3308c2ecf20Sopenharmony_ci				iio_buffer_block_put(block);
3318c2ecf20Sopenharmony_ci				block = NULL;
3328c2ecf20Sopenharmony_ci			} else {
3338c2ecf20Sopenharmony_ci				block->size = size;
3348c2ecf20Sopenharmony_ci			}
3358c2ecf20Sopenharmony_ci		} else {
3368c2ecf20Sopenharmony_ci			block = NULL;
3378c2ecf20Sopenharmony_ci		}
3388c2ecf20Sopenharmony_ci
3398c2ecf20Sopenharmony_ci		if (!block) {
3408c2ecf20Sopenharmony_ci			block = iio_dma_buffer_alloc_block(queue, size);
3418c2ecf20Sopenharmony_ci			if (!block) {
3428c2ecf20Sopenharmony_ci				ret = -ENOMEM;
3438c2ecf20Sopenharmony_ci				goto out_unlock;
3448c2ecf20Sopenharmony_ci			}
3458c2ecf20Sopenharmony_ci			queue->fileio.blocks[i] = block;
3468c2ecf20Sopenharmony_ci		}
3478c2ecf20Sopenharmony_ci
3488c2ecf20Sopenharmony_ci		block->state = IIO_BLOCK_STATE_QUEUED;
3498c2ecf20Sopenharmony_ci		list_add_tail(&block->head, &queue->incoming);
3508c2ecf20Sopenharmony_ci	}
3518c2ecf20Sopenharmony_ci
3528c2ecf20Sopenharmony_ciout_unlock:
3538c2ecf20Sopenharmony_ci	mutex_unlock(&queue->lock);
3548c2ecf20Sopenharmony_ci
3558c2ecf20Sopenharmony_ci	return ret;
3568c2ecf20Sopenharmony_ci}
3578c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
3588c2ecf20Sopenharmony_ci
3598c2ecf20Sopenharmony_cistatic void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
3608c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block)
3618c2ecf20Sopenharmony_ci{
3628c2ecf20Sopenharmony_ci	int ret;
3638c2ecf20Sopenharmony_ci
3648c2ecf20Sopenharmony_ci	/*
3658c2ecf20Sopenharmony_ci	 * If the hardware has already been removed we put the block into
3668c2ecf20Sopenharmony_ci	 * limbo. It will neither be on the incoming nor outgoing list, nor will
3678c2ecf20Sopenharmony_ci	 * it ever complete. It will just wait to be freed eventually.
3688c2ecf20Sopenharmony_ci	 */
3698c2ecf20Sopenharmony_ci	if (!queue->ops)
3708c2ecf20Sopenharmony_ci		return;
3718c2ecf20Sopenharmony_ci
3728c2ecf20Sopenharmony_ci	block->state = IIO_BLOCK_STATE_ACTIVE;
3738c2ecf20Sopenharmony_ci	iio_buffer_block_get(block);
3748c2ecf20Sopenharmony_ci	ret = queue->ops->submit(queue, block);
3758c2ecf20Sopenharmony_ci	if (ret) {
3768c2ecf20Sopenharmony_ci		/*
3778c2ecf20Sopenharmony_ci		 * This is a bit of a problem and there is not much we can do
3788c2ecf20Sopenharmony_ci		 * other then wait for the buffer to be disabled and re-enabled
3798c2ecf20Sopenharmony_ci		 * and try again. But it should not really happen unless we run
3808c2ecf20Sopenharmony_ci		 * out of memory or something similar.
3818c2ecf20Sopenharmony_ci		 *
3828c2ecf20Sopenharmony_ci		 * TODO: Implement support in the IIO core to allow buffers to
3838c2ecf20Sopenharmony_ci		 * notify consumers that something went wrong and the buffer
3848c2ecf20Sopenharmony_ci		 * should be disabled.
3858c2ecf20Sopenharmony_ci		 */
3868c2ecf20Sopenharmony_ci		iio_buffer_block_put(block);
3878c2ecf20Sopenharmony_ci	}
3888c2ecf20Sopenharmony_ci}
3898c2ecf20Sopenharmony_ci
3908c2ecf20Sopenharmony_ci/**
3918c2ecf20Sopenharmony_ci * iio_dma_buffer_enable() - Enable DMA buffer
3928c2ecf20Sopenharmony_ci * @buffer: IIO buffer to enable
3938c2ecf20Sopenharmony_ci * @indio_dev: IIO device the buffer is attached to
3948c2ecf20Sopenharmony_ci *
3958c2ecf20Sopenharmony_ci * Needs to be called when the device that the buffer is attached to starts
3968c2ecf20Sopenharmony_ci * sampling. Typically should be the iio_buffer_access_ops enable callback.
3978c2ecf20Sopenharmony_ci *
3988c2ecf20Sopenharmony_ci * This will allocate the DMA buffers and start the DMA transfers.
3998c2ecf20Sopenharmony_ci */
4008c2ecf20Sopenharmony_ciint iio_dma_buffer_enable(struct iio_buffer *buffer,
4018c2ecf20Sopenharmony_ci	struct iio_dev *indio_dev)
4028c2ecf20Sopenharmony_ci{
4038c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
4048c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block, *_block;
4058c2ecf20Sopenharmony_ci
4068c2ecf20Sopenharmony_ci	mutex_lock(&queue->lock);
4078c2ecf20Sopenharmony_ci	queue->active = true;
4088c2ecf20Sopenharmony_ci	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
4098c2ecf20Sopenharmony_ci		list_del(&block->head);
4108c2ecf20Sopenharmony_ci		iio_dma_buffer_submit_block(queue, block);
4118c2ecf20Sopenharmony_ci	}
4128c2ecf20Sopenharmony_ci	mutex_unlock(&queue->lock);
4138c2ecf20Sopenharmony_ci
4148c2ecf20Sopenharmony_ci	return 0;
4158c2ecf20Sopenharmony_ci}
4168c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ci/**
4198c2ecf20Sopenharmony_ci * iio_dma_buffer_disable() - Disable DMA buffer
4208c2ecf20Sopenharmony_ci * @buffer: IIO DMA buffer to disable
4218c2ecf20Sopenharmony_ci * @indio_dev: IIO device the buffer is attached to
4228c2ecf20Sopenharmony_ci *
4238c2ecf20Sopenharmony_ci * Needs to be called when the device that the buffer is attached to stops
4248c2ecf20Sopenharmony_ci * sampling. Typically should be the iio_buffer_access_ops disable callback.
4258c2ecf20Sopenharmony_ci */
4268c2ecf20Sopenharmony_ciint iio_dma_buffer_disable(struct iio_buffer *buffer,
4278c2ecf20Sopenharmony_ci	struct iio_dev *indio_dev)
4288c2ecf20Sopenharmony_ci{
4298c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
4308c2ecf20Sopenharmony_ci
4318c2ecf20Sopenharmony_ci	mutex_lock(&queue->lock);
4328c2ecf20Sopenharmony_ci	queue->active = false;
4338c2ecf20Sopenharmony_ci
4348c2ecf20Sopenharmony_ci	if (queue->ops && queue->ops->abort)
4358c2ecf20Sopenharmony_ci		queue->ops->abort(queue);
4368c2ecf20Sopenharmony_ci	mutex_unlock(&queue->lock);
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_ci	return 0;
4398c2ecf20Sopenharmony_ci}
4408c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
4418c2ecf20Sopenharmony_ci
4428c2ecf20Sopenharmony_cistatic void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
4438c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block)
4448c2ecf20Sopenharmony_ci{
4458c2ecf20Sopenharmony_ci	if (block->state == IIO_BLOCK_STATE_DEAD) {
4468c2ecf20Sopenharmony_ci		iio_buffer_block_put(block);
4478c2ecf20Sopenharmony_ci	} else if (queue->active) {
4488c2ecf20Sopenharmony_ci		iio_dma_buffer_submit_block(queue, block);
4498c2ecf20Sopenharmony_ci	} else {
4508c2ecf20Sopenharmony_ci		block->state = IIO_BLOCK_STATE_QUEUED;
4518c2ecf20Sopenharmony_ci		list_add_tail(&block->head, &queue->incoming);
4528c2ecf20Sopenharmony_ci	}
4538c2ecf20Sopenharmony_ci}
4548c2ecf20Sopenharmony_ci
4558c2ecf20Sopenharmony_cistatic struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
4568c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue)
4578c2ecf20Sopenharmony_ci{
4588c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block;
4598c2ecf20Sopenharmony_ci
4608c2ecf20Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
4618c2ecf20Sopenharmony_ci	block = list_first_entry_or_null(&queue->outgoing, struct
4628c2ecf20Sopenharmony_ci		iio_dma_buffer_block, head);
4638c2ecf20Sopenharmony_ci	if (block != NULL) {
4648c2ecf20Sopenharmony_ci		list_del(&block->head);
4658c2ecf20Sopenharmony_ci		block->state = IIO_BLOCK_STATE_DEQUEUED;
4668c2ecf20Sopenharmony_ci	}
4678c2ecf20Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
4688c2ecf20Sopenharmony_ci
4698c2ecf20Sopenharmony_ci	return block;
4708c2ecf20Sopenharmony_ci}
4718c2ecf20Sopenharmony_ci
4728c2ecf20Sopenharmony_ci/**
4738c2ecf20Sopenharmony_ci * iio_dma_buffer_read() - DMA buffer read callback
4748c2ecf20Sopenharmony_ci * @buffer: Buffer to read form
4758c2ecf20Sopenharmony_ci * @n: Number of bytes to read
4768c2ecf20Sopenharmony_ci * @user_buffer: Userspace buffer to copy the data to
4778c2ecf20Sopenharmony_ci *
4788c2ecf20Sopenharmony_ci * Should be used as the read callback for iio_buffer_access_ops
4798c2ecf20Sopenharmony_ci * struct for DMA buffers.
4808c2ecf20Sopenharmony_ci */
4818c2ecf20Sopenharmony_ciint iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
4828c2ecf20Sopenharmony_ci	char __user *user_buffer)
4838c2ecf20Sopenharmony_ci{
4848c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
4858c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block;
4868c2ecf20Sopenharmony_ci	int ret;
4878c2ecf20Sopenharmony_ci
4888c2ecf20Sopenharmony_ci	if (n < buffer->bytes_per_datum)
4898c2ecf20Sopenharmony_ci		return -EINVAL;
4908c2ecf20Sopenharmony_ci
4918c2ecf20Sopenharmony_ci	mutex_lock(&queue->lock);
4928c2ecf20Sopenharmony_ci
4938c2ecf20Sopenharmony_ci	if (!queue->fileio.active_block) {
4948c2ecf20Sopenharmony_ci		block = iio_dma_buffer_dequeue(queue);
4958c2ecf20Sopenharmony_ci		if (block == NULL) {
4968c2ecf20Sopenharmony_ci			ret = 0;
4978c2ecf20Sopenharmony_ci			goto out_unlock;
4988c2ecf20Sopenharmony_ci		}
4998c2ecf20Sopenharmony_ci		queue->fileio.pos = 0;
5008c2ecf20Sopenharmony_ci		queue->fileio.active_block = block;
5018c2ecf20Sopenharmony_ci	} else {
5028c2ecf20Sopenharmony_ci		block = queue->fileio.active_block;
5038c2ecf20Sopenharmony_ci	}
5048c2ecf20Sopenharmony_ci
5058c2ecf20Sopenharmony_ci	n = rounddown(n, buffer->bytes_per_datum);
5068c2ecf20Sopenharmony_ci	if (n > block->bytes_used - queue->fileio.pos)
5078c2ecf20Sopenharmony_ci		n = block->bytes_used - queue->fileio.pos;
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_ci	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
5108c2ecf20Sopenharmony_ci		ret = -EFAULT;
5118c2ecf20Sopenharmony_ci		goto out_unlock;
5128c2ecf20Sopenharmony_ci	}
5138c2ecf20Sopenharmony_ci
5148c2ecf20Sopenharmony_ci	queue->fileio.pos += n;
5158c2ecf20Sopenharmony_ci
5168c2ecf20Sopenharmony_ci	if (queue->fileio.pos == block->bytes_used) {
5178c2ecf20Sopenharmony_ci		queue->fileio.active_block = NULL;
5188c2ecf20Sopenharmony_ci		iio_dma_buffer_enqueue(queue, block);
5198c2ecf20Sopenharmony_ci	}
5208c2ecf20Sopenharmony_ci
5218c2ecf20Sopenharmony_ci	ret = n;
5228c2ecf20Sopenharmony_ci
5238c2ecf20Sopenharmony_ciout_unlock:
5248c2ecf20Sopenharmony_ci	mutex_unlock(&queue->lock);
5258c2ecf20Sopenharmony_ci
5268c2ecf20Sopenharmony_ci	return ret;
5278c2ecf20Sopenharmony_ci}
5288c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_read);
5298c2ecf20Sopenharmony_ci
5308c2ecf20Sopenharmony_ci/**
5318c2ecf20Sopenharmony_ci * iio_dma_buffer_data_available() - DMA buffer data_available callback
5328c2ecf20Sopenharmony_ci * @buf: Buffer to check for data availability
5338c2ecf20Sopenharmony_ci *
5348c2ecf20Sopenharmony_ci * Should be used as the data_available callback for iio_buffer_access_ops
5358c2ecf20Sopenharmony_ci * struct for DMA buffers.
5368c2ecf20Sopenharmony_ci */
5378c2ecf20Sopenharmony_cisize_t iio_dma_buffer_data_available(struct iio_buffer *buf)
5388c2ecf20Sopenharmony_ci{
5398c2ecf20Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
5408c2ecf20Sopenharmony_ci	struct iio_dma_buffer_block *block;
5418c2ecf20Sopenharmony_ci	size_t data_available = 0;
5428c2ecf20Sopenharmony_ci
5438c2ecf20Sopenharmony_ci	/*
5448c2ecf20Sopenharmony_ci	 * For counting the available bytes we'll use the size of the block not
5458c2ecf20Sopenharmony_ci	 * the number of actual bytes available in the block. Otherwise it is
5468c2ecf20Sopenharmony_ci	 * possible that we end up with a value that is lower than the watermark
5478c2ecf20Sopenharmony_ci	 * but won't increase since all blocks are in use.
5488c2ecf20Sopenharmony_ci	 */
5498c2ecf20Sopenharmony_ci
5508c2ecf20Sopenharmony_ci	mutex_lock(&queue->lock);
5518c2ecf20Sopenharmony_ci	if (queue->fileio.active_block)
5528c2ecf20Sopenharmony_ci		data_available += queue->fileio.active_block->size;
5538c2ecf20Sopenharmony_ci
5548c2ecf20Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
5558c2ecf20Sopenharmony_ci	list_for_each_entry(block, &queue->outgoing, head)
5568c2ecf20Sopenharmony_ci		data_available += block->size;
5578c2ecf20Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
5588c2ecf20Sopenharmony_ci	mutex_unlock(&queue->lock);
5598c2ecf20Sopenharmony_ci
5608c2ecf20Sopenharmony_ci	return data_available;
5618c2ecf20Sopenharmony_ci}
5628c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
5638c2ecf20Sopenharmony_ci
5648c2ecf20Sopenharmony_ci/**
5658c2ecf20Sopenharmony_ci * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
5668c2ecf20Sopenharmony_ci * @buffer: Buffer to set the bytes-per-datum for
5678c2ecf20Sopenharmony_ci * @bpd: The new bytes-per-datum value
5688c2ecf20Sopenharmony_ci *
5698c2ecf20Sopenharmony_ci * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
5708c2ecf20Sopenharmony_ci * struct for DMA buffers.
5718c2ecf20Sopenharmony_ci */
5728c2ecf20Sopenharmony_ciint iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
5738c2ecf20Sopenharmony_ci{
5748c2ecf20Sopenharmony_ci	buffer->bytes_per_datum = bpd;
5758c2ecf20Sopenharmony_ci
5768c2ecf20Sopenharmony_ci	return 0;
5778c2ecf20Sopenharmony_ci}
5788c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
5798c2ecf20Sopenharmony_ci
5808c2ecf20Sopenharmony_ci/**
5818c2ecf20Sopenharmony_ci * iio_dma_buffer_set_length - DMA buffer set_length callback
5828c2ecf20Sopenharmony_ci * @buffer: Buffer to set the length for
5838c2ecf20Sopenharmony_ci * @length: The new buffer length
5848c2ecf20Sopenharmony_ci *
5858c2ecf20Sopenharmony_ci * Should be used as the set_length callback for iio_buffer_access_ops
5868c2ecf20Sopenharmony_ci * struct for DMA buffers.
5878c2ecf20Sopenharmony_ci */
5888c2ecf20Sopenharmony_ciint iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
5898c2ecf20Sopenharmony_ci{
5908c2ecf20Sopenharmony_ci	/* Avoid an invalid state */
5918c2ecf20Sopenharmony_ci	if (length < 2)
5928c2ecf20Sopenharmony_ci		length = 2;
5938c2ecf20Sopenharmony_ci	buffer->length = length;
5948c2ecf20Sopenharmony_ci	buffer->watermark = length / 2;
5958c2ecf20Sopenharmony_ci
5968c2ecf20Sopenharmony_ci	return 0;
5978c2ecf20Sopenharmony_ci}
5988c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
5998c2ecf20Sopenharmony_ci
6008c2ecf20Sopenharmony_ci/**
6018c2ecf20Sopenharmony_ci * iio_dma_buffer_init() - Initialize DMA buffer queue
6028c2ecf20Sopenharmony_ci * @queue: Buffer to initialize
6038c2ecf20Sopenharmony_ci * @dev: DMA device
6048c2ecf20Sopenharmony_ci * @ops: DMA buffer queue callback operations
6058c2ecf20Sopenharmony_ci *
6068c2ecf20Sopenharmony_ci * The DMA device will be used by the queue to do DMA memory allocations. So it
6078c2ecf20Sopenharmony_ci * should refer to the device that will perform the DMA to ensure that
6088c2ecf20Sopenharmony_ci * allocations are done from a memory region that can be accessed by the device.
6098c2ecf20Sopenharmony_ci */
6108c2ecf20Sopenharmony_ciint iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
6118c2ecf20Sopenharmony_ci	struct device *dev, const struct iio_dma_buffer_ops *ops)
6128c2ecf20Sopenharmony_ci{
6138c2ecf20Sopenharmony_ci	iio_buffer_init(&queue->buffer);
6148c2ecf20Sopenharmony_ci	queue->buffer.length = PAGE_SIZE;
6158c2ecf20Sopenharmony_ci	queue->buffer.watermark = queue->buffer.length / 2;
6168c2ecf20Sopenharmony_ci	queue->dev = dev;
6178c2ecf20Sopenharmony_ci	queue->ops = ops;
6188c2ecf20Sopenharmony_ci
6198c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&queue->incoming);
6208c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&queue->outgoing);
6218c2ecf20Sopenharmony_ci
6228c2ecf20Sopenharmony_ci	mutex_init(&queue->lock);
6238c2ecf20Sopenharmony_ci	spin_lock_init(&queue->list_lock);
6248c2ecf20Sopenharmony_ci
6258c2ecf20Sopenharmony_ci	return 0;
6268c2ecf20Sopenharmony_ci}
6278c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_init);
6288c2ecf20Sopenharmony_ci
6298c2ecf20Sopenharmony_ci/**
6308c2ecf20Sopenharmony_ci * iio_dma_buffer_exit() - Cleanup DMA buffer queue
6318c2ecf20Sopenharmony_ci * @queue: Buffer to cleanup
6328c2ecf20Sopenharmony_ci *
6338c2ecf20Sopenharmony_ci * After this function has completed it is safe to free any resources that are
6348c2ecf20Sopenharmony_ci * associated with the buffer and are accessed inside the callback operations.
6358c2ecf20Sopenharmony_ci */
6368c2ecf20Sopenharmony_civoid iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
6378c2ecf20Sopenharmony_ci{
6388c2ecf20Sopenharmony_ci	unsigned int i;
6398c2ecf20Sopenharmony_ci
6408c2ecf20Sopenharmony_ci	mutex_lock(&queue->lock);
6418c2ecf20Sopenharmony_ci
6428c2ecf20Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
6438c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
6448c2ecf20Sopenharmony_ci		if (!queue->fileio.blocks[i])
6458c2ecf20Sopenharmony_ci			continue;
6468c2ecf20Sopenharmony_ci		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
6478c2ecf20Sopenharmony_ci	}
6488c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&queue->outgoing);
6498c2ecf20Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
6508c2ecf20Sopenharmony_ci
6518c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&queue->incoming);
6528c2ecf20Sopenharmony_ci
6538c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
6548c2ecf20Sopenharmony_ci		if (!queue->fileio.blocks[i])
6558c2ecf20Sopenharmony_ci			continue;
6568c2ecf20Sopenharmony_ci		iio_buffer_block_put(queue->fileio.blocks[i]);
6578c2ecf20Sopenharmony_ci		queue->fileio.blocks[i] = NULL;
6588c2ecf20Sopenharmony_ci	}
6598c2ecf20Sopenharmony_ci	queue->fileio.active_block = NULL;
6608c2ecf20Sopenharmony_ci	queue->ops = NULL;
6618c2ecf20Sopenharmony_ci
6628c2ecf20Sopenharmony_ci	mutex_unlock(&queue->lock);
6638c2ecf20Sopenharmony_ci}
6648c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
6658c2ecf20Sopenharmony_ci
6668c2ecf20Sopenharmony_ci/**
6678c2ecf20Sopenharmony_ci * iio_dma_buffer_release() - Release final buffer resources
6688c2ecf20Sopenharmony_ci * @queue: Buffer to release
6698c2ecf20Sopenharmony_ci *
6708c2ecf20Sopenharmony_ci * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
6718c2ecf20Sopenharmony_ci * called in the buffers release callback implementation right before freeing
6728c2ecf20Sopenharmony_ci * the memory associated with the buffer.
6738c2ecf20Sopenharmony_ci */
6748c2ecf20Sopenharmony_civoid iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
6758c2ecf20Sopenharmony_ci{
6768c2ecf20Sopenharmony_ci	mutex_destroy(&queue->lock);
6778c2ecf20Sopenharmony_ci}
6788c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_release);
6798c2ecf20Sopenharmony_ci
6808c2ecf20Sopenharmony_ciMODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
6818c2ecf20Sopenharmony_ciMODULE_DESCRIPTION("DMA buffer for the IIO framework");
6828c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL v2");
683