Lines Matching refs:queue

33  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
51 * incoming or outgoing queue the block will be freed.
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
103 iio_buffer_put(&block->queue->buffer);
166 struct iio_dma_buffer_queue *queue, size_t size)
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
183 block->queue = queue;
187 iio_buffer_get(&queue->buffer);
194 struct iio_dma_buffer_queue *queue = block->queue;
202 list_add_tail(&block->head, &queue->outgoing);
211 * pass back ownership of the block to the queue.
215 struct iio_dma_buffer_queue *queue = block->queue;
218 spin_lock_irqsave(&queue->list_lock, flags);
220 spin_unlock_irqrestore(&queue->list_lock, flags);
223 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
230 * @queue: Queue for which to complete blocks.
231 * @list: List of aborted blocks. All blocks in this list must be from @queue.
235 * hand the blocks back to the queue.
237 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
243 spin_lock_irqsave(&queue->list_lock, flags);
250 spin_unlock_irqrestore(&queue->list_lock, flags);
252 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
294 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
295 queue->buffer.length, 2);
297 mutex_lock(&queue->lock);
300 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
303 queue->fileio.block_size = size;
304 queue->fileio.active_block = NULL;
306 spin_lock_irq(&queue->list_lock);
307 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
308 block = queue->fileio.blocks[i];
320 INIT_LIST_HEAD(&queue->outgoing);
321 spin_unlock_irq(&queue->list_lock);
323 INIT_LIST_HEAD(&queue->incoming);
325 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
326 if (queue->fileio.blocks[i]) {
327 block = queue->fileio.blocks[i];
340 block = iio_dma_buffer_alloc_block(queue, size);
345 queue->fileio.blocks[i] = block;
349 list_add_tail(&block->head, &queue->incoming);
353 mutex_unlock(&queue->lock);
359 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
369 if (!queue->ops)
374 ret = queue->ops->submit(queue, block);
403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
406 mutex_lock(&queue->lock);
407 queue->active = true;
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
410 iio_dma_buffer_submit_block(queue, block);
412 mutex_unlock(&queue->lock);
429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
431 mutex_lock(&queue->lock);
432 queue->active = false;
434 if (queue->ops && queue->ops->abort)
435 queue->ops->abort(queue);
436 mutex_unlock(&queue->lock);
442 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
447 } else if (queue->active) {
448 iio_dma_buffer_submit_block(queue, block);
451 list_add_tail(&block->head, &queue->incoming);
456 struct iio_dma_buffer_queue *queue)
460 spin_lock_irq(&queue->list_lock);
461 block = list_first_entry_or_null(&queue->outgoing, struct
467 spin_unlock_irq(&queue->list_lock);
484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
491 mutex_lock(&queue->lock);
493 if (!queue->fileio.active_block) {
494 block = iio_dma_buffer_dequeue(queue);
499 queue->fileio.pos = 0;
500 queue->fileio.active_block = block;
502 block = queue->fileio.active_block;
506 if (n > block->bytes_used - queue->fileio.pos)
507 n = block->bytes_used - queue->fileio.pos;
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
514 queue->fileio.pos += n;
516 if (queue->fileio.pos == block->bytes_used) {
517 queue->fileio.active_block = NULL;
518 iio_dma_buffer_enqueue(queue, block);
524 mutex_unlock(&queue->lock);
539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
550 mutex_lock(&queue->lock);
551 if (queue->fileio.active_block)
552 data_available += queue->fileio.active_block->size;
554 spin_lock_irq(&queue->list_lock);
555 list_for_each_entry(block, &queue->outgoing, head)
557 spin_unlock_irq(&queue->list_lock);
558 mutex_unlock(&queue->lock);
601 * iio_dma_buffer_init() - Initialize DMA buffer queue
602 * @queue: Buffer to initialize
604 * @ops: DMA buffer queue callback operations
606 * The DMA device will be used by the queue to do DMA memory allocations. So it
610 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
613 iio_buffer_init(&queue->buffer);
614 queue->buffer.length = PAGE_SIZE;
615 queue->buffer.watermark = queue->buffer.length / 2;
616 queue->dev = dev;
617 queue->ops = ops;
619 INIT_LIST_HEAD(&queue->incoming);
620 INIT_LIST_HEAD(&queue->outgoing);
622 mutex_init(&queue->lock);
623 spin_lock_init(&queue->list_lock);
630 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
631 * @queue: Buffer to cleanup
636 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
640 mutex_lock(&queue->lock);
642 spin_lock_irq(&queue->list_lock);
643 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
644 if (!queue->fileio.blocks[i])
646 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
648 INIT_LIST_HEAD(&queue->outgoing);
649 spin_unlock_irq(&queue->list_lock);
651 INIT_LIST_HEAD(&queue->incoming);
653 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
654 if (!queue->fileio.blocks[i])
656 iio_buffer_block_put(queue->fileio.blocks[i]);
657 queue->fileio.blocks[i] = NULL;
659 queue->fileio.active_block = NULL;
660 queue->ops = NULL;
662 mutex_unlock(&queue->lock);
668 * @queue: Buffer to release
674 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
676 mutex_destroy(&queue->lock);