162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright 2013-2015 Analog Devices Inc.
462306a36Sopenharmony_ci *  Author: Lars-Peter Clausen <lars@metafoo.de>
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#include <linux/slab.h>
862306a36Sopenharmony_ci#include <linux/kernel.h>
962306a36Sopenharmony_ci#include <linux/module.h>
1062306a36Sopenharmony_ci#include <linux/device.h>
1162306a36Sopenharmony_ci#include <linux/workqueue.h>
1262306a36Sopenharmony_ci#include <linux/mutex.h>
1362306a36Sopenharmony_ci#include <linux/sched.h>
1462306a36Sopenharmony_ci#include <linux/poll.h>
1562306a36Sopenharmony_ci#include <linux/iio/buffer_impl.h>
1662306a36Sopenharmony_ci#include <linux/iio/buffer-dma.h>
1762306a36Sopenharmony_ci#include <linux/dma-mapping.h>
1862306a36Sopenharmony_ci#include <linux/sizes.h>
1962306a36Sopenharmony_ci
2062306a36Sopenharmony_ci/*
2162306a36Sopenharmony_ci * For DMA buffers the storage is sub-divided into so called blocks. Each block
2262306a36Sopenharmony_ci * has its own memory buffer. The size of the block is the granularity at which
2362306a36Sopenharmony_ci * memory is exchanged between the hardware and the application. Increasing the
2462306a36Sopenharmony_ci * basic unit of data exchange from one sample to one block decreases the
2562306a36Sopenharmony_ci * management overhead that is associated with each sample. E.g. if we say the
2662306a36Sopenharmony_ci * management overhead for one exchange is x and the unit of exchange is one
2762306a36Sopenharmony_ci * sample the overhead will be x for each sample. Whereas when using a block
2862306a36Sopenharmony_ci * which contains n samples the overhead per sample is reduced to x/n. This
2962306a36Sopenharmony_ci * allows to achieve much higher samplerates than what can be sustained with
3062306a36Sopenharmony_ci * the one sample approach.
3162306a36Sopenharmony_ci *
3262306a36Sopenharmony_ci * Blocks are exchanged between the DMA controller and the application via the
3362306a36Sopenharmony_ci * means of two queues. The incoming queue and the outgoing queue. Blocks on the
3462306a36Sopenharmony_ci * incoming queue are waiting for the DMA controller to pick them up and fill
3562306a36Sopenharmony_ci * them with data. Block on the outgoing queue have been filled with data and
3662306a36Sopenharmony_ci * are waiting for the application to dequeue them and read the data.
3762306a36Sopenharmony_ci *
3862306a36Sopenharmony_ci * A block can be in one of the following states:
3962306a36Sopenharmony_ci *  * Owned by the application. In this state the application can read data from
4062306a36Sopenharmony_ci *    the block.
4162306a36Sopenharmony_ci *  * On the incoming list: Blocks on the incoming list are queued up to be
4262306a36Sopenharmony_ci *    processed by the DMA controller.
4362306a36Sopenharmony_ci *  * Owned by the DMA controller: The DMA controller is processing the block
4462306a36Sopenharmony_ci *    and filling it with data.
4562306a36Sopenharmony_ci *  * On the outgoing list: Blocks on the outgoing list have been successfully
4662306a36Sopenharmony_ci *    processed by the DMA controller and contain data. They can be dequeued by
4762306a36Sopenharmony_ci *    the application.
4862306a36Sopenharmony_ci *  * Dead: A block that is dead has been marked as to be freed. It might still
4962306a36Sopenharmony_ci *    be owned by either the application or the DMA controller at the moment.
5062306a36Sopenharmony_ci *    But once they are done processing it instead of going to either the
5162306a36Sopenharmony_ci *    incoming or outgoing queue the block will be freed.
5262306a36Sopenharmony_ci *
5362306a36Sopenharmony_ci * In addition to this blocks are reference counted and the memory associated
5462306a36Sopenharmony_ci * with both the block structure as well as the storage memory for the block
5562306a36Sopenharmony_ci * will be freed when the last reference to the block is dropped. This means a
5662306a36Sopenharmony_ci * block must not be accessed without holding a reference.
5762306a36Sopenharmony_ci *
5862306a36Sopenharmony_ci * The iio_dma_buffer implementation provides a generic infrastructure for
5962306a36Sopenharmony_ci * managing the blocks.
6062306a36Sopenharmony_ci *
6162306a36Sopenharmony_ci * A driver for a specific piece of hardware that has DMA capabilities need to
6262306a36Sopenharmony_ci * implement the submit() callback from the iio_dma_buffer_ops structure. This
6362306a36Sopenharmony_ci * callback is supposed to initiate the DMA transfer copying data from the
6462306a36Sopenharmony_ci * converter to the memory region of the block. Once the DMA transfer has been
6562306a36Sopenharmony_ci * completed the driver must call iio_dma_buffer_block_done() for the completed
6662306a36Sopenharmony_ci * block.
6762306a36Sopenharmony_ci *
6862306a36Sopenharmony_ci * Prior to this it must set the bytes_used field of the block contains
6962306a36Sopenharmony_ci * the actual number of bytes in the buffer. Typically this will be equal to the
7062306a36Sopenharmony_ci * size of the block, but if the DMA hardware has certain alignment requirements
7162306a36Sopenharmony_ci * for the transfer length it might choose to use less than the full size. In
7262306a36Sopenharmony_ci * either case it is expected that bytes_used is a multiple of the bytes per
7362306a36Sopenharmony_ci * datum, i.e. the block must not contain partial samples.
7462306a36Sopenharmony_ci *
7562306a36Sopenharmony_ci * The driver must call iio_dma_buffer_block_done() for each block it has
7662306a36Sopenharmony_ci * received through its submit_block() callback, even if it does not actually
7762306a36Sopenharmony_ci * perform a DMA transfer for the block, e.g. because the buffer was disabled
7862306a36Sopenharmony_ci * before the block transfer was started. In this case it should set bytes_used
7962306a36Sopenharmony_ci * to 0.
8062306a36Sopenharmony_ci *
8162306a36Sopenharmony_ci * In addition it is recommended that a driver implements the abort() callback.
8262306a36Sopenharmony_ci * It will be called when the buffer is disabled and can be used to cancel
8362306a36Sopenharmony_ci * pending and stop active transfers.
8462306a36Sopenharmony_ci *
8562306a36Sopenharmony_ci * The specific driver implementation should use the default callback
8662306a36Sopenharmony_ci * implementations provided by this module for the iio_buffer_access_funcs
8762306a36Sopenharmony_ci * struct. It may overload some callbacks with custom variants if the hardware
8862306a36Sopenharmony_ci * has special requirements that are not handled by the generic functions. If a
8962306a36Sopenharmony_ci * driver chooses to overload a callback it has to ensure that the generic
9062306a36Sopenharmony_ci * callback is called from within the custom callback.
9162306a36Sopenharmony_ci */
9262306a36Sopenharmony_ci
9362306a36Sopenharmony_cistatic void iio_buffer_block_release(struct kref *kref)
9462306a36Sopenharmony_ci{
9562306a36Sopenharmony_ci	struct iio_dma_buffer_block *block = container_of(kref,
9662306a36Sopenharmony_ci		struct iio_dma_buffer_block, kref);
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ci	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
10162306a36Sopenharmony_ci					block->vaddr, block->phys_addr);
10262306a36Sopenharmony_ci
10362306a36Sopenharmony_ci	iio_buffer_put(&block->queue->buffer);
10462306a36Sopenharmony_ci	kfree(block);
10562306a36Sopenharmony_ci}
10662306a36Sopenharmony_ci
10762306a36Sopenharmony_cistatic void iio_buffer_block_get(struct iio_dma_buffer_block *block)
10862306a36Sopenharmony_ci{
10962306a36Sopenharmony_ci	kref_get(&block->kref);
11062306a36Sopenharmony_ci}
11162306a36Sopenharmony_ci
11262306a36Sopenharmony_cistatic void iio_buffer_block_put(struct iio_dma_buffer_block *block)
11362306a36Sopenharmony_ci{
11462306a36Sopenharmony_ci	kref_put(&block->kref, iio_buffer_block_release);
11562306a36Sopenharmony_ci}
11662306a36Sopenharmony_ci
11762306a36Sopenharmony_ci/*
11862306a36Sopenharmony_ci * dma_free_coherent can sleep, hence we need to take some special care to be
11962306a36Sopenharmony_ci * able to drop a reference from an atomic context.
12062306a36Sopenharmony_ci */
12162306a36Sopenharmony_cistatic LIST_HEAD(iio_dma_buffer_dead_blocks);
12262306a36Sopenharmony_cistatic DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_cistatic void iio_dma_buffer_cleanup_worker(struct work_struct *work)
12562306a36Sopenharmony_ci{
12662306a36Sopenharmony_ci	struct iio_dma_buffer_block *block, *_block;
12762306a36Sopenharmony_ci	LIST_HEAD(block_list);
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
13062306a36Sopenharmony_ci	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
13162306a36Sopenharmony_ci	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci	list_for_each_entry_safe(block, _block, &block_list, head)
13462306a36Sopenharmony_ci		iio_buffer_block_release(&block->kref);
13562306a36Sopenharmony_ci}
13662306a36Sopenharmony_cistatic DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
13762306a36Sopenharmony_ci
13862306a36Sopenharmony_cistatic void iio_buffer_block_release_atomic(struct kref *kref)
13962306a36Sopenharmony_ci{
14062306a36Sopenharmony_ci	struct iio_dma_buffer_block *block;
14162306a36Sopenharmony_ci	unsigned long flags;
14262306a36Sopenharmony_ci
14362306a36Sopenharmony_ci	block = container_of(kref, struct iio_dma_buffer_block, kref);
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci	spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
14662306a36Sopenharmony_ci	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
14762306a36Sopenharmony_ci	spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_ci	schedule_work(&iio_dma_buffer_cleanup_work);
15062306a36Sopenharmony_ci}
15162306a36Sopenharmony_ci
15262306a36Sopenharmony_ci/*
15362306a36Sopenharmony_ci * Version of iio_buffer_block_put() that can be called from atomic context
15462306a36Sopenharmony_ci */
15562306a36Sopenharmony_cistatic void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
15662306a36Sopenharmony_ci{
15762306a36Sopenharmony_ci	kref_put(&block->kref, iio_buffer_block_release_atomic);
15862306a36Sopenharmony_ci}
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_cistatic struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
16162306a36Sopenharmony_ci{
16262306a36Sopenharmony_ci	return container_of(buf, struct iio_dma_buffer_queue, buffer);
16362306a36Sopenharmony_ci}
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_cistatic struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
16662306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue, size_t size)
16762306a36Sopenharmony_ci{
16862306a36Sopenharmony_ci	struct iio_dma_buffer_block *block;
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	block = kzalloc(sizeof(*block), GFP_KERNEL);
17162306a36Sopenharmony_ci	if (!block)
17262306a36Sopenharmony_ci		return NULL;
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
17562306a36Sopenharmony_ci		&block->phys_addr, GFP_KERNEL);
17662306a36Sopenharmony_ci	if (!block->vaddr) {
17762306a36Sopenharmony_ci		kfree(block);
17862306a36Sopenharmony_ci		return NULL;
17962306a36Sopenharmony_ci	}
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_ci	block->size = size;
18262306a36Sopenharmony_ci	block->state = IIO_BLOCK_STATE_DEQUEUED;
18362306a36Sopenharmony_ci	block->queue = queue;
18462306a36Sopenharmony_ci	INIT_LIST_HEAD(&block->head);
18562306a36Sopenharmony_ci	kref_init(&block->kref);
18662306a36Sopenharmony_ci
18762306a36Sopenharmony_ci	iio_buffer_get(&queue->buffer);
18862306a36Sopenharmony_ci
18962306a36Sopenharmony_ci	return block;
19062306a36Sopenharmony_ci}
19162306a36Sopenharmony_ci
19262306a36Sopenharmony_cistatic void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
19362306a36Sopenharmony_ci{
19462306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = block->queue;
19562306a36Sopenharmony_ci
19662306a36Sopenharmony_ci	/*
19762306a36Sopenharmony_ci	 * The buffer has already been freed by the application, just drop the
19862306a36Sopenharmony_ci	 * reference.
19962306a36Sopenharmony_ci	 */
20062306a36Sopenharmony_ci	if (block->state != IIO_BLOCK_STATE_DEAD) {
20162306a36Sopenharmony_ci		block->state = IIO_BLOCK_STATE_DONE;
20262306a36Sopenharmony_ci		list_add_tail(&block->head, &queue->outgoing);
20362306a36Sopenharmony_ci	}
20462306a36Sopenharmony_ci}
20562306a36Sopenharmony_ci
20662306a36Sopenharmony_ci/**
20762306a36Sopenharmony_ci * iio_dma_buffer_block_done() - Indicate that a block has been completed
20862306a36Sopenharmony_ci * @block: The completed block
20962306a36Sopenharmony_ci *
21062306a36Sopenharmony_ci * Should be called when the DMA controller has finished handling the block to
21162306a36Sopenharmony_ci * pass back ownership of the block to the queue.
21262306a36Sopenharmony_ci */
21362306a36Sopenharmony_civoid iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
21462306a36Sopenharmony_ci{
21562306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = block->queue;
21662306a36Sopenharmony_ci	unsigned long flags;
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_ci	spin_lock_irqsave(&queue->list_lock, flags);
21962306a36Sopenharmony_ci	_iio_dma_buffer_block_done(block);
22062306a36Sopenharmony_ci	spin_unlock_irqrestore(&queue->list_lock, flags);
22162306a36Sopenharmony_ci
22262306a36Sopenharmony_ci	iio_buffer_block_put_atomic(block);
22362306a36Sopenharmony_ci	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
22462306a36Sopenharmony_ci}
22562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_ci/**
22862306a36Sopenharmony_ci * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
22962306a36Sopenharmony_ci *   aborted
23062306a36Sopenharmony_ci * @queue: Queue for which to complete blocks.
23162306a36Sopenharmony_ci * @list: List of aborted blocks. All blocks in this list must be from @queue.
23262306a36Sopenharmony_ci *
23362306a36Sopenharmony_ci * Typically called from the abort() callback after the DMA controller has been
23462306a36Sopenharmony_ci * stopped. This will set bytes_used to 0 for each block in the list and then
23562306a36Sopenharmony_ci * hand the blocks back to the queue.
23662306a36Sopenharmony_ci */
23762306a36Sopenharmony_civoid iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
23862306a36Sopenharmony_ci	struct list_head *list)
23962306a36Sopenharmony_ci{
24062306a36Sopenharmony_ci	struct iio_dma_buffer_block *block, *_block;
24162306a36Sopenharmony_ci	unsigned long flags;
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_ci	spin_lock_irqsave(&queue->list_lock, flags);
24462306a36Sopenharmony_ci	list_for_each_entry_safe(block, _block, list, head) {
24562306a36Sopenharmony_ci		list_del(&block->head);
24662306a36Sopenharmony_ci		block->bytes_used = 0;
24762306a36Sopenharmony_ci		_iio_dma_buffer_block_done(block);
24862306a36Sopenharmony_ci		iio_buffer_block_put_atomic(block);
24962306a36Sopenharmony_ci	}
25062306a36Sopenharmony_ci	spin_unlock_irqrestore(&queue->list_lock, flags);
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_ci	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
25362306a36Sopenharmony_ci}
25462306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
25562306a36Sopenharmony_ci
25662306a36Sopenharmony_cistatic bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
25762306a36Sopenharmony_ci{
25862306a36Sopenharmony_ci	/*
25962306a36Sopenharmony_ci	 * If the core owns the block it can be re-used. This should be the
26062306a36Sopenharmony_ci	 * default case when enabling the buffer, unless the DMA controller does
26162306a36Sopenharmony_ci	 * not support abort and has not given back the block yet.
26262306a36Sopenharmony_ci	 */
26362306a36Sopenharmony_ci	switch (block->state) {
26462306a36Sopenharmony_ci	case IIO_BLOCK_STATE_DEQUEUED:
26562306a36Sopenharmony_ci	case IIO_BLOCK_STATE_QUEUED:
26662306a36Sopenharmony_ci	case IIO_BLOCK_STATE_DONE:
26762306a36Sopenharmony_ci		return true;
26862306a36Sopenharmony_ci	default:
26962306a36Sopenharmony_ci		return false;
27062306a36Sopenharmony_ci	}
27162306a36Sopenharmony_ci}
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci/**
27462306a36Sopenharmony_ci * iio_dma_buffer_request_update() - DMA buffer request_update callback
27562306a36Sopenharmony_ci * @buffer: The buffer which to request an update
27662306a36Sopenharmony_ci *
27762306a36Sopenharmony_ci * Should be used as the iio_dma_buffer_request_update() callback for
27862306a36Sopenharmony_ci * iio_buffer_access_ops struct for DMA buffers.
27962306a36Sopenharmony_ci */
28062306a36Sopenharmony_ciint iio_dma_buffer_request_update(struct iio_buffer *buffer)
28162306a36Sopenharmony_ci{
28262306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
28362306a36Sopenharmony_ci	struct iio_dma_buffer_block *block;
28462306a36Sopenharmony_ci	bool try_reuse = false;
28562306a36Sopenharmony_ci	size_t size;
28662306a36Sopenharmony_ci	int ret = 0;
28762306a36Sopenharmony_ci	int i;
28862306a36Sopenharmony_ci
28962306a36Sopenharmony_ci	/*
29062306a36Sopenharmony_ci	 * Split the buffer into two even parts. This is used as a double
29162306a36Sopenharmony_ci	 * buffering scheme with usually one block at a time being used by the
29262306a36Sopenharmony_ci	 * DMA and the other one by the application.
29362306a36Sopenharmony_ci	 */
29462306a36Sopenharmony_ci	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
29562306a36Sopenharmony_ci		queue->buffer.length, 2);
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_ci	mutex_lock(&queue->lock);
29862306a36Sopenharmony_ci
29962306a36Sopenharmony_ci	/* Allocations are page aligned */
30062306a36Sopenharmony_ci	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
30162306a36Sopenharmony_ci		try_reuse = true;
30262306a36Sopenharmony_ci
30362306a36Sopenharmony_ci	queue->fileio.block_size = size;
30462306a36Sopenharmony_ci	queue->fileio.active_block = NULL;
30562306a36Sopenharmony_ci
30662306a36Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
30762306a36Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
30862306a36Sopenharmony_ci		block = queue->fileio.blocks[i];
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_ci		/* If we can't re-use it free it */
31162306a36Sopenharmony_ci		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
31262306a36Sopenharmony_ci			block->state = IIO_BLOCK_STATE_DEAD;
31362306a36Sopenharmony_ci	}
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci	/*
31662306a36Sopenharmony_ci	 * At this point all blocks are either owned by the core or marked as
31762306a36Sopenharmony_ci	 * dead. This means we can reset the lists without having to fear
31862306a36Sopenharmony_ci	 * corrution.
31962306a36Sopenharmony_ci	 */
32062306a36Sopenharmony_ci	INIT_LIST_HEAD(&queue->outgoing);
32162306a36Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
32262306a36Sopenharmony_ci
32362306a36Sopenharmony_ci	INIT_LIST_HEAD(&queue->incoming);
32462306a36Sopenharmony_ci
32562306a36Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
32662306a36Sopenharmony_ci		if (queue->fileio.blocks[i]) {
32762306a36Sopenharmony_ci			block = queue->fileio.blocks[i];
32862306a36Sopenharmony_ci			if (block->state == IIO_BLOCK_STATE_DEAD) {
32962306a36Sopenharmony_ci				/* Could not reuse it */
33062306a36Sopenharmony_ci				iio_buffer_block_put(block);
33162306a36Sopenharmony_ci				block = NULL;
33262306a36Sopenharmony_ci			} else {
33362306a36Sopenharmony_ci				block->size = size;
33462306a36Sopenharmony_ci			}
33562306a36Sopenharmony_ci		} else {
33662306a36Sopenharmony_ci			block = NULL;
33762306a36Sopenharmony_ci		}
33862306a36Sopenharmony_ci
33962306a36Sopenharmony_ci		if (!block) {
34062306a36Sopenharmony_ci			block = iio_dma_buffer_alloc_block(queue, size);
34162306a36Sopenharmony_ci			if (!block) {
34262306a36Sopenharmony_ci				ret = -ENOMEM;
34362306a36Sopenharmony_ci				goto out_unlock;
34462306a36Sopenharmony_ci			}
34562306a36Sopenharmony_ci			queue->fileio.blocks[i] = block;
34662306a36Sopenharmony_ci		}
34762306a36Sopenharmony_ci
34862306a36Sopenharmony_ci		block->state = IIO_BLOCK_STATE_QUEUED;
34962306a36Sopenharmony_ci		list_add_tail(&block->head, &queue->incoming);
35062306a36Sopenharmony_ci	}
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ciout_unlock:
35362306a36Sopenharmony_ci	mutex_unlock(&queue->lock);
35462306a36Sopenharmony_ci
35562306a36Sopenharmony_ci	return ret;
35662306a36Sopenharmony_ci}
35762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_cistatic void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
36062306a36Sopenharmony_ci	struct iio_dma_buffer_block *block)
36162306a36Sopenharmony_ci{
36262306a36Sopenharmony_ci	int ret;
36362306a36Sopenharmony_ci
36462306a36Sopenharmony_ci	/*
36562306a36Sopenharmony_ci	 * If the hardware has already been removed we put the block into
36662306a36Sopenharmony_ci	 * limbo. It will neither be on the incoming nor outgoing list, nor will
36762306a36Sopenharmony_ci	 * it ever complete. It will just wait to be freed eventually.
36862306a36Sopenharmony_ci	 */
36962306a36Sopenharmony_ci	if (!queue->ops)
37062306a36Sopenharmony_ci		return;
37162306a36Sopenharmony_ci
37262306a36Sopenharmony_ci	block->state = IIO_BLOCK_STATE_ACTIVE;
37362306a36Sopenharmony_ci	iio_buffer_block_get(block);
37462306a36Sopenharmony_ci	ret = queue->ops->submit(queue, block);
37562306a36Sopenharmony_ci	if (ret) {
37662306a36Sopenharmony_ci		/*
37762306a36Sopenharmony_ci		 * This is a bit of a problem and there is not much we can do
37862306a36Sopenharmony_ci		 * other then wait for the buffer to be disabled and re-enabled
37962306a36Sopenharmony_ci		 * and try again. But it should not really happen unless we run
38062306a36Sopenharmony_ci		 * out of memory or something similar.
38162306a36Sopenharmony_ci		 *
38262306a36Sopenharmony_ci		 * TODO: Implement support in the IIO core to allow buffers to
38362306a36Sopenharmony_ci		 * notify consumers that something went wrong and the buffer
38462306a36Sopenharmony_ci		 * should be disabled.
38562306a36Sopenharmony_ci		 */
38662306a36Sopenharmony_ci		iio_buffer_block_put(block);
38762306a36Sopenharmony_ci	}
38862306a36Sopenharmony_ci}
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_ci/**
39162306a36Sopenharmony_ci * iio_dma_buffer_enable() - Enable DMA buffer
39262306a36Sopenharmony_ci * @buffer: IIO buffer to enable
39362306a36Sopenharmony_ci * @indio_dev: IIO device the buffer is attached to
39462306a36Sopenharmony_ci *
39562306a36Sopenharmony_ci * Needs to be called when the device that the buffer is attached to starts
39662306a36Sopenharmony_ci * sampling. Typically should be the iio_buffer_access_ops enable callback.
39762306a36Sopenharmony_ci *
39862306a36Sopenharmony_ci * This will allocate the DMA buffers and start the DMA transfers.
39962306a36Sopenharmony_ci */
40062306a36Sopenharmony_ciint iio_dma_buffer_enable(struct iio_buffer *buffer,
40162306a36Sopenharmony_ci	struct iio_dev *indio_dev)
40262306a36Sopenharmony_ci{
40362306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
40462306a36Sopenharmony_ci	struct iio_dma_buffer_block *block, *_block;
40562306a36Sopenharmony_ci
40662306a36Sopenharmony_ci	mutex_lock(&queue->lock);
40762306a36Sopenharmony_ci	queue->active = true;
40862306a36Sopenharmony_ci	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
40962306a36Sopenharmony_ci		list_del(&block->head);
41062306a36Sopenharmony_ci		iio_dma_buffer_submit_block(queue, block);
41162306a36Sopenharmony_ci	}
41262306a36Sopenharmony_ci	mutex_unlock(&queue->lock);
41362306a36Sopenharmony_ci
41462306a36Sopenharmony_ci	return 0;
41562306a36Sopenharmony_ci}
41662306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
41762306a36Sopenharmony_ci
41862306a36Sopenharmony_ci/**
41962306a36Sopenharmony_ci * iio_dma_buffer_disable() - Disable DMA buffer
42062306a36Sopenharmony_ci * @buffer: IIO DMA buffer to disable
42162306a36Sopenharmony_ci * @indio_dev: IIO device the buffer is attached to
42262306a36Sopenharmony_ci *
42362306a36Sopenharmony_ci * Needs to be called when the device that the buffer is attached to stops
42462306a36Sopenharmony_ci * sampling. Typically should be the iio_buffer_access_ops disable callback.
42562306a36Sopenharmony_ci */
42662306a36Sopenharmony_ciint iio_dma_buffer_disable(struct iio_buffer *buffer,
42762306a36Sopenharmony_ci	struct iio_dev *indio_dev)
42862306a36Sopenharmony_ci{
42962306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
43062306a36Sopenharmony_ci
43162306a36Sopenharmony_ci	mutex_lock(&queue->lock);
43262306a36Sopenharmony_ci	queue->active = false;
43362306a36Sopenharmony_ci
43462306a36Sopenharmony_ci	if (queue->ops && queue->ops->abort)
43562306a36Sopenharmony_ci		queue->ops->abort(queue);
43662306a36Sopenharmony_ci	mutex_unlock(&queue->lock);
43762306a36Sopenharmony_ci
43862306a36Sopenharmony_ci	return 0;
43962306a36Sopenharmony_ci}
44062306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
44162306a36Sopenharmony_ci
44262306a36Sopenharmony_cistatic void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
44362306a36Sopenharmony_ci	struct iio_dma_buffer_block *block)
44462306a36Sopenharmony_ci{
44562306a36Sopenharmony_ci	if (block->state == IIO_BLOCK_STATE_DEAD) {
44662306a36Sopenharmony_ci		iio_buffer_block_put(block);
44762306a36Sopenharmony_ci	} else if (queue->active) {
44862306a36Sopenharmony_ci		iio_dma_buffer_submit_block(queue, block);
44962306a36Sopenharmony_ci	} else {
45062306a36Sopenharmony_ci		block->state = IIO_BLOCK_STATE_QUEUED;
45162306a36Sopenharmony_ci		list_add_tail(&block->head, &queue->incoming);
45262306a36Sopenharmony_ci	}
45362306a36Sopenharmony_ci}
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_cistatic struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
45662306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue)
45762306a36Sopenharmony_ci{
45862306a36Sopenharmony_ci	struct iio_dma_buffer_block *block;
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
46162306a36Sopenharmony_ci	block = list_first_entry_or_null(&queue->outgoing, struct
46262306a36Sopenharmony_ci		iio_dma_buffer_block, head);
46362306a36Sopenharmony_ci	if (block != NULL) {
46462306a36Sopenharmony_ci		list_del(&block->head);
46562306a36Sopenharmony_ci		block->state = IIO_BLOCK_STATE_DEQUEUED;
46662306a36Sopenharmony_ci	}
46762306a36Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_ci	return block;
47062306a36Sopenharmony_ci}
47162306a36Sopenharmony_ci
47262306a36Sopenharmony_ci/**
47362306a36Sopenharmony_ci * iio_dma_buffer_read() - DMA buffer read callback
47462306a36Sopenharmony_ci * @buffer: Buffer to read form
47562306a36Sopenharmony_ci * @n: Number of bytes to read
47662306a36Sopenharmony_ci * @user_buffer: Userspace buffer to copy the data to
47762306a36Sopenharmony_ci *
47862306a36Sopenharmony_ci * Should be used as the read callback for iio_buffer_access_ops
47962306a36Sopenharmony_ci * struct for DMA buffers.
48062306a36Sopenharmony_ci */
48162306a36Sopenharmony_ciint iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
48262306a36Sopenharmony_ci	char __user *user_buffer)
48362306a36Sopenharmony_ci{
48462306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
48562306a36Sopenharmony_ci	struct iio_dma_buffer_block *block;
48662306a36Sopenharmony_ci	int ret;
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_ci	if (n < buffer->bytes_per_datum)
48962306a36Sopenharmony_ci		return -EINVAL;
49062306a36Sopenharmony_ci
49162306a36Sopenharmony_ci	mutex_lock(&queue->lock);
49262306a36Sopenharmony_ci
49362306a36Sopenharmony_ci	if (!queue->fileio.active_block) {
49462306a36Sopenharmony_ci		block = iio_dma_buffer_dequeue(queue);
49562306a36Sopenharmony_ci		if (block == NULL) {
49662306a36Sopenharmony_ci			ret = 0;
49762306a36Sopenharmony_ci			goto out_unlock;
49862306a36Sopenharmony_ci		}
49962306a36Sopenharmony_ci		queue->fileio.pos = 0;
50062306a36Sopenharmony_ci		queue->fileio.active_block = block;
50162306a36Sopenharmony_ci	} else {
50262306a36Sopenharmony_ci		block = queue->fileio.active_block;
50362306a36Sopenharmony_ci	}
50462306a36Sopenharmony_ci
50562306a36Sopenharmony_ci	n = rounddown(n, buffer->bytes_per_datum);
50662306a36Sopenharmony_ci	if (n > block->bytes_used - queue->fileio.pos)
50762306a36Sopenharmony_ci		n = block->bytes_used - queue->fileio.pos;
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
51062306a36Sopenharmony_ci		ret = -EFAULT;
51162306a36Sopenharmony_ci		goto out_unlock;
51262306a36Sopenharmony_ci	}
51362306a36Sopenharmony_ci
51462306a36Sopenharmony_ci	queue->fileio.pos += n;
51562306a36Sopenharmony_ci
51662306a36Sopenharmony_ci	if (queue->fileio.pos == block->bytes_used) {
51762306a36Sopenharmony_ci		queue->fileio.active_block = NULL;
51862306a36Sopenharmony_ci		iio_dma_buffer_enqueue(queue, block);
51962306a36Sopenharmony_ci	}
52062306a36Sopenharmony_ci
52162306a36Sopenharmony_ci	ret = n;
52262306a36Sopenharmony_ci
52362306a36Sopenharmony_ciout_unlock:
52462306a36Sopenharmony_ci	mutex_unlock(&queue->lock);
52562306a36Sopenharmony_ci
52662306a36Sopenharmony_ci	return ret;
52762306a36Sopenharmony_ci}
52862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_read);
52962306a36Sopenharmony_ci
53062306a36Sopenharmony_ci/**
53162306a36Sopenharmony_ci * iio_dma_buffer_data_available() - DMA buffer data_available callback
53262306a36Sopenharmony_ci * @buf: Buffer to check for data availability
53362306a36Sopenharmony_ci *
53462306a36Sopenharmony_ci * Should be used as the data_available callback for iio_buffer_access_ops
53562306a36Sopenharmony_ci * struct for DMA buffers.
53662306a36Sopenharmony_ci */
53762306a36Sopenharmony_cisize_t iio_dma_buffer_data_available(struct iio_buffer *buf)
53862306a36Sopenharmony_ci{
53962306a36Sopenharmony_ci	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
54062306a36Sopenharmony_ci	struct iio_dma_buffer_block *block;
54162306a36Sopenharmony_ci	size_t data_available = 0;
54262306a36Sopenharmony_ci
54362306a36Sopenharmony_ci	/*
54462306a36Sopenharmony_ci	 * For counting the available bytes we'll use the size of the block not
54562306a36Sopenharmony_ci	 * the number of actual bytes available in the block. Otherwise it is
54662306a36Sopenharmony_ci	 * possible that we end up with a value that is lower than the watermark
54762306a36Sopenharmony_ci	 * but won't increase since all blocks are in use.
54862306a36Sopenharmony_ci	 */
54962306a36Sopenharmony_ci
55062306a36Sopenharmony_ci	mutex_lock(&queue->lock);
55162306a36Sopenharmony_ci	if (queue->fileio.active_block)
55262306a36Sopenharmony_ci		data_available += queue->fileio.active_block->size;
55362306a36Sopenharmony_ci
55462306a36Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
55562306a36Sopenharmony_ci	list_for_each_entry(block, &queue->outgoing, head)
55662306a36Sopenharmony_ci		data_available += block->size;
55762306a36Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
55862306a36Sopenharmony_ci	mutex_unlock(&queue->lock);
55962306a36Sopenharmony_ci
56062306a36Sopenharmony_ci	return data_available;
56162306a36Sopenharmony_ci}
56262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
56362306a36Sopenharmony_ci
56462306a36Sopenharmony_ci/**
56562306a36Sopenharmony_ci * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
56662306a36Sopenharmony_ci * @buffer: Buffer to set the bytes-per-datum for
56762306a36Sopenharmony_ci * @bpd: The new bytes-per-datum value
56862306a36Sopenharmony_ci *
56962306a36Sopenharmony_ci * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
57062306a36Sopenharmony_ci * struct for DMA buffers.
57162306a36Sopenharmony_ci */
57262306a36Sopenharmony_ciint iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
57362306a36Sopenharmony_ci{
57462306a36Sopenharmony_ci	buffer->bytes_per_datum = bpd;
57562306a36Sopenharmony_ci
57662306a36Sopenharmony_ci	return 0;
57762306a36Sopenharmony_ci}
57862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
57962306a36Sopenharmony_ci
58062306a36Sopenharmony_ci/**
58162306a36Sopenharmony_ci * iio_dma_buffer_set_length - DMA buffer set_length callback
58262306a36Sopenharmony_ci * @buffer: Buffer to set the length for
58362306a36Sopenharmony_ci * @length: The new buffer length
58462306a36Sopenharmony_ci *
58562306a36Sopenharmony_ci * Should be used as the set_length callback for iio_buffer_access_ops
58662306a36Sopenharmony_ci * struct for DMA buffers.
58762306a36Sopenharmony_ci */
58862306a36Sopenharmony_ciint iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
58962306a36Sopenharmony_ci{
59062306a36Sopenharmony_ci	/* Avoid an invalid state */
59162306a36Sopenharmony_ci	if (length < 2)
59262306a36Sopenharmony_ci		length = 2;
59362306a36Sopenharmony_ci	buffer->length = length;
59462306a36Sopenharmony_ci	buffer->watermark = length / 2;
59562306a36Sopenharmony_ci
59662306a36Sopenharmony_ci	return 0;
59762306a36Sopenharmony_ci}
59862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
59962306a36Sopenharmony_ci
60062306a36Sopenharmony_ci/**
60162306a36Sopenharmony_ci * iio_dma_buffer_init() - Initialize DMA buffer queue
60262306a36Sopenharmony_ci * @queue: Buffer to initialize
60362306a36Sopenharmony_ci * @dev: DMA device
60462306a36Sopenharmony_ci * @ops: DMA buffer queue callback operations
60562306a36Sopenharmony_ci *
60662306a36Sopenharmony_ci * The DMA device will be used by the queue to do DMA memory allocations. So it
60762306a36Sopenharmony_ci * should refer to the device that will perform the DMA to ensure that
60862306a36Sopenharmony_ci * allocations are done from a memory region that can be accessed by the device.
60962306a36Sopenharmony_ci */
61062306a36Sopenharmony_ciint iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
61162306a36Sopenharmony_ci	struct device *dev, const struct iio_dma_buffer_ops *ops)
61262306a36Sopenharmony_ci{
61362306a36Sopenharmony_ci	iio_buffer_init(&queue->buffer);
61462306a36Sopenharmony_ci	queue->buffer.length = PAGE_SIZE;
61562306a36Sopenharmony_ci	queue->buffer.watermark = queue->buffer.length / 2;
61662306a36Sopenharmony_ci	queue->dev = dev;
61762306a36Sopenharmony_ci	queue->ops = ops;
61862306a36Sopenharmony_ci
61962306a36Sopenharmony_ci	INIT_LIST_HEAD(&queue->incoming);
62062306a36Sopenharmony_ci	INIT_LIST_HEAD(&queue->outgoing);
62162306a36Sopenharmony_ci
62262306a36Sopenharmony_ci	mutex_init(&queue->lock);
62362306a36Sopenharmony_ci	spin_lock_init(&queue->list_lock);
62462306a36Sopenharmony_ci
62562306a36Sopenharmony_ci	return 0;
62662306a36Sopenharmony_ci}
62762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_init);
62862306a36Sopenharmony_ci
62962306a36Sopenharmony_ci/**
63062306a36Sopenharmony_ci * iio_dma_buffer_exit() - Cleanup DMA buffer queue
63162306a36Sopenharmony_ci * @queue: Buffer to cleanup
63262306a36Sopenharmony_ci *
63362306a36Sopenharmony_ci * After this function has completed it is safe to free any resources that are
63462306a36Sopenharmony_ci * associated with the buffer and are accessed inside the callback operations.
63562306a36Sopenharmony_ci */
63662306a36Sopenharmony_civoid iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
63762306a36Sopenharmony_ci{
63862306a36Sopenharmony_ci	unsigned int i;
63962306a36Sopenharmony_ci
64062306a36Sopenharmony_ci	mutex_lock(&queue->lock);
64162306a36Sopenharmony_ci
64262306a36Sopenharmony_ci	spin_lock_irq(&queue->list_lock);
64362306a36Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
64462306a36Sopenharmony_ci		if (!queue->fileio.blocks[i])
64562306a36Sopenharmony_ci			continue;
64662306a36Sopenharmony_ci		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
64762306a36Sopenharmony_ci	}
64862306a36Sopenharmony_ci	INIT_LIST_HEAD(&queue->outgoing);
64962306a36Sopenharmony_ci	spin_unlock_irq(&queue->list_lock);
65062306a36Sopenharmony_ci
65162306a36Sopenharmony_ci	INIT_LIST_HEAD(&queue->incoming);
65262306a36Sopenharmony_ci
65362306a36Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
65462306a36Sopenharmony_ci		if (!queue->fileio.blocks[i])
65562306a36Sopenharmony_ci			continue;
65662306a36Sopenharmony_ci		iio_buffer_block_put(queue->fileio.blocks[i]);
65762306a36Sopenharmony_ci		queue->fileio.blocks[i] = NULL;
65862306a36Sopenharmony_ci	}
65962306a36Sopenharmony_ci	queue->fileio.active_block = NULL;
66062306a36Sopenharmony_ci	queue->ops = NULL;
66162306a36Sopenharmony_ci
66262306a36Sopenharmony_ci	mutex_unlock(&queue->lock);
66362306a36Sopenharmony_ci}
66462306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
66562306a36Sopenharmony_ci
66662306a36Sopenharmony_ci/**
66762306a36Sopenharmony_ci * iio_dma_buffer_release() - Release final buffer resources
66862306a36Sopenharmony_ci * @queue: Buffer to release
66962306a36Sopenharmony_ci *
67062306a36Sopenharmony_ci * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
67162306a36Sopenharmony_ci * called in the buffers release callback implementation right before freeing
67262306a36Sopenharmony_ci * the memory associated with the buffer.
67362306a36Sopenharmony_ci */
67462306a36Sopenharmony_civoid iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
67562306a36Sopenharmony_ci{
67662306a36Sopenharmony_ci	mutex_destroy(&queue->lock);
67762306a36Sopenharmony_ci}
67862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(iio_dma_buffer_release);
67962306a36Sopenharmony_ci
68062306a36Sopenharmony_ciMODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
68162306a36Sopenharmony_ciMODULE_DESCRIPTION("DMA buffer for the IIO framework");
68262306a36Sopenharmony_ciMODULE_LICENSE("GPL v2");
683