Lines Matching refs:dmab

27 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
30 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
33 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
35 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
39 return ops->alloc(dmab, size);
49 * @dmab: buffer allocation record to store the allocated data
59 struct snd_dma_buffer *dmab)
63 if (WARN_ON(!dmab))
67 dmab->dev.type = type;
68 dmab->dev.dev = device;
69 dmab->dev.dir = dir;
70 dmab->bytes = 0;
71 dmab->addr = 0;
72 dmab->private_data = NULL;
73 dmab->area = __snd_dma_alloc_pages(dmab, size);
74 if (!dmab->area)
76 dmab->bytes = size;
86 * @dmab: buffer allocation record to store the allocated data
97 struct snd_dma_buffer *dmab)
101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
109 if (! dmab->area)
117 * @dmab: the buffer allocation record to release
121 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
126 ops->free(dmab);
156 struct snd_dma_buffer *dmab;
163 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
164 if (!dmab)
167 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
169 devres_free(dmab);
173 devres_add(dev, dmab);
174 return dmab;
180 * @dmab: buffer allocation information
185 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
190 if (!dmab)
192 ops = snd_dma_get_ops(dmab);
194 return ops->mmap(dmab, area);
203 * @dmab: buffer allocation information
206 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
211 if (!dmab || !dmab->dev.need_sync)
213 ops = snd_dma_get_ops(dmab);
215 ops->sync(dmab, mode);
222 * @dmab: buffer allocation information
227 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
229 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
232 return ops->get_addr(dmab, offset);
234 return dmab->addr + offset;
240 * @dmab: buffer allocation information
245 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
247 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
250 return ops->get_page(dmab, offset);
252 return virt_to_page(dmab->area + offset);
259 * @dmab: buffer allocation information
265 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
268 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
271 return ops->get_chunk_size(dmab, ofs, size);
320 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
322 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
325 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
327 do_free_pages(dmab->area, dmab->bytes, false);
330 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
334 dmab->addr >> PAGE_SHIFT,
348 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
353 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
355 vfree(dmab->area);
358 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
361 return remap_vmalloc_range(area, dmab->area, 0);
364 #define get_vmalloc_page_addr(dmab, offset) \
365 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
367 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
370 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
373 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
376 return vmalloc_to_page(dmab->area + offset);
380 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
389 addr = get_vmalloc_page_addr(dmab, start);
395 if (get_vmalloc_page_addr(dmab, start) != addr)
416 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
418 struct device *dev = dmab->dev.dev;
425 dmab->private_data = pool;
427 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
435 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
436 return __snd_dma_alloc_pages(dmab, size);
439 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
441 struct gen_pool *pool = dmab->private_data;
443 if (pool && dmab->area)
444 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
447 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
452 dmab->addr >> PAGE_SHIFT,
467 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
469 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
472 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
474 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
477 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
480 return dma_mmap_coherent(dmab->dev.dev, area,
481 dmab->area, dmab->addr, dmab->bytes);
495 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
497 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
500 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
502 do_free_pages(dmab->area, dmab->bytes, true);
505 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
509 return snd_dma_continuous_mmap(dmab, area);
512 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
514 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
517 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
519 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
522 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
525 return dma_mmap_wc(dmab->dev.dev, area,
526 dmab->area, dmab->addr, dmab->bytes);
539 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
546 return snd_dma_sg_fallback_alloc(dmab, size);
548 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
551 if (!sgt && !get_dma_ops(dmab->dev.dev))
552 return snd_dma_sg_fallback_alloc(dmab, size);
557 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
559 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
561 dmab->private_data = sgt;
563 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
565 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
570 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
572 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
573 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
574 dmab->dev.dir);
577 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
580 return dma_mmap_noncontiguous(dmab->dev.dev, area,
581 dmab->bytes, dmab->private_data);
584 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
588 if (dmab->dev.dir == DMA_TO_DEVICE)
590 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
591 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
592 dmab->dev.dir);
594 if (dmab->dev.dir == DMA_FROM_DEVICE)
596 flush_kernel_vmap_range(dmab->area, dmab->bytes);
597 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
598 dmab->dev.dir);
602 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
606 struct sg_table *sgt = dmab->private_data;
612 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
617 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
622 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
627 snd_dma_noncontig_iter_set(dmab, &iter, offset);
633 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
642 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
674 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
676 void *p = snd_dma_noncontig_alloc(dmab, size);
677 struct sg_table *sgt = dmab->private_data;
682 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
689 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
691 struct sg_table *sgt = dmab->private_data;
696 snd_dma_noncontig_free(dmab);
699 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
703 return dma_mmap_noncontiguous(dmab->dev.dev, area,
704 dmab->bytes, dmab->private_data);
726 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
740 dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
754 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
764 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
765 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
766 else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
767 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
786 p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
788 p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
813 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
816 dmab->private_data = sgbuf;
818 dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
822 __snd_dma_sg_fallback_free(dmab, sgbuf);
826 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
828 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
830 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
832 vunmap(dmab->area);
833 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
836 static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
839 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
845 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
848 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
850 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
869 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
873 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
874 dmab->dev.dir, DEFAULT_GFP);
876 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
880 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
882 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
883 dmab->addr, dmab->dev.dir);
886 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
890 return dma_mmap_pages(dmab->dev.dev, area,
892 virt_to_page(dmab->area));
895 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
899 if (dmab->dev.dir != DMA_TO_DEVICE)
900 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
901 dmab->bytes, dmab->dev.dir);
903 if (dmab->dev.dir != DMA_FROM_DEVICE)
904 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
905 dmab->bytes, dmab->dev.dir);
942 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
944 if (WARN_ON_ONCE(!dmab))
946 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
947 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
949 return snd_dma_ops[dmab->dev.type];