Lines Matching refs:block
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
24 * basic unit of data exchange from one sample to one block decreases the
27 * sample the overhead will be x for each sample. Whereas when using a block
38 * A block can be in one of the following states:
40 * the block.
43 * * Owned by the DMA controller: The DMA controller is processing the block
48 * * Dead: A block that is dead has been marked as to be freed. It might still
51 * incoming or outgoing queue the block will be freed.
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
64 * converter to the memory region of the block. Once the DMA transfer has been
66 * block.
68 * Prior to this it must set the bytes_used field of the block contains
70 * size of the block, but if the DMA hardware has certain alignment requirements
73 * datum, i.e. the block must not contain partial samples.
75 * The driver must call iio_dma_buffer_block_done() for each block it has
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
95 struct iio_dma_buffer_block *block = container_of(kref,
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
101 block->vaddr, block->phys_addr);
103 iio_buffer_put(&block->queue->buffer);
104 kfree(block);
107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
109 kref_get(&block->kref);
112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
114 kref_put(&block->kref, iio_buffer_block_release);
126 struct iio_dma_buffer_block *block, *_block;
133 list_for_each_entry_safe(block, _block, &block_list, head)
134 iio_buffer_block_release(&block->kref);
140 struct iio_dma_buffer_block *block;
143 block = container_of(kref, struct iio_dma_buffer_block, kref);
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
157 kref_put(&block->kref, iio_buffer_block_release_atomic);
168 struct iio_dma_buffer_block *block;
170 block = kzalloc(sizeof(*block), GFP_KERNEL);
171 if (!block)
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
175 &block->phys_addr, GFP_KERNEL);
176 if (!block->vaddr) {
177 kfree(block);
181 block->size = size;
182 block->state = IIO_BLOCK_STATE_DEQUEUED;
183 block->queue = queue;
184 INIT_LIST_HEAD(&block->head);
185 kref_init(&block->kref);
189 return block;
192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
194 struct iio_dma_buffer_queue *queue = block->queue;
200 if (block->state != IIO_BLOCK_STATE_DEAD) {
201 block->state = IIO_BLOCK_STATE_DONE;
202 list_add_tail(&block->head, &queue->outgoing);
207 * iio_dma_buffer_block_done() - Indicate that a block has been completed
208 * @block: The completed block
210 * Should be called when the DMA controller has finished handling the block to
211 * pass back ownership of the block to the queue.
213 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
215 struct iio_dma_buffer_queue *queue = block->queue;
219 _iio_dma_buffer_block_done(block);
222 iio_buffer_block_put_atomic(block);
228 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
234 * stopped. This will set bytes_used to 0 for each block in the list and then
240 struct iio_dma_buffer_block *block, *_block;
244 list_for_each_entry_safe(block, _block, list, head) {
245 list_del(&block->head);
246 block->bytes_used = 0;
247 _iio_dma_buffer_block_done(block);
248 iio_buffer_block_put_atomic(block);
256 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
259 * If the core owns the block it can be re-used. This should be the
261 * not support abort and has not given back the block yet.
263 switch (block->state) {
283 struct iio_dma_buffer_block *block;
291 * buffering scheme with usually one block at a time being used by the
308 block = queue->fileio.blocks[i];
311 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
312 block->state = IIO_BLOCK_STATE_DEAD;
327 block = queue->fileio.blocks[i];
328 if (block->state == IIO_BLOCK_STATE_DEAD) {
330 iio_buffer_block_put(block);
331 block = NULL;
333 block->size = size;
336 block = NULL;
339 if (!block) {
340 block = iio_dma_buffer_alloc_block(queue, size);
341 if (!block) {
345 queue->fileio.blocks[i] = block;
348 block->state = IIO_BLOCK_STATE_QUEUED;
349 list_add_tail(&block->head, &queue->incoming);
360 struct iio_dma_buffer_block *block)
365 * If the hardware has already been removed we put the block into
372 block->state = IIO_BLOCK_STATE_ACTIVE;
373 iio_buffer_block_get(block);
374 ret = queue->ops->submit(queue, block);
386 iio_buffer_block_put(block);
404 struct iio_dma_buffer_block *block, *_block;
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
409 list_del(&block->head);
410 iio_dma_buffer_submit_block(queue, block);
443 struct iio_dma_buffer_block *block)
445 if (block->state == IIO_BLOCK_STATE_DEAD) {
446 iio_buffer_block_put(block);
448 iio_dma_buffer_submit_block(queue, block);
450 block->state = IIO_BLOCK_STATE_QUEUED;
451 list_add_tail(&block->head, &queue->incoming);
458 struct iio_dma_buffer_block *block;
461 block = list_first_entry_or_null(&queue->outgoing, struct
463 if (block != NULL) {
464 list_del(&block->head);
465 block->state = IIO_BLOCK_STATE_DEQUEUED;
469 return block;
485 struct iio_dma_buffer_block *block;
494 block = iio_dma_buffer_dequeue(queue);
495 if (block == NULL) {
500 queue->fileio.active_block = block;
502 block = queue->fileio.active_block;
506 if (n > block->bytes_used - queue->fileio.pos)
507 n = block->bytes_used - queue->fileio.pos;
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
516 if (queue->fileio.pos == block->bytes_used) {
518 iio_dma_buffer_enqueue(queue, block);
540 struct iio_dma_buffer_block *block;
544 * For counting the available bytes we'll use the size of the block not
545 * the number of actual bytes available in the block. Otherwise it is
555 list_for_each_entry(block, &queue->outgoing, head)
556 data_available += block->size;