Lines Matching refs:size

43 	size_t size;
53 size_t size;
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
115 phys_addr_t end = base + size;
116 while (size > 0) {
123 size -= PAGE_SIZE;
129 memset(ptr, 0, size);
131 dmac_flush_range(ptr, ptr + size);
132 outer_flush_range(__pa(ptr), __pa(ptr) + size);
138 * Allocate a DMA buffer for 'dev' of size 'size' using the
139 * specified gfp mask. Note that 'size' must be page aligned.
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
144 unsigned long order = get_order(size);
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
158 __dma_clear_buffer(page, size, coherent_flag);
164 * Free a DMA buffer. 'size' must be page aligned.
166 static void __dma_free_buffer(struct page *page, size_t size)
168 struct page *e = page + (size >> PAGE_SHIFT);
176 static void *__alloc_from_contiguous(struct device *dev, size_t size,
181 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
254 unsigned long size;
262 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
265 dma_mmu_remap[dma_mmu_remap_num].size = size;
275 phys_addr_t end = start + dma_mmu_remap[i].size;
319 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
322 unsigned end = start + size;
324 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
328 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
338 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
344 ptr = dma_common_contiguous_remap(page, size, prot, caller);
346 __dma_free_buffer(page, size);
355 static void *__alloc_from_pool(size_t size, struct page **ret_page)
365 val = gen_pool_alloc(atomic_pool, size);
376 static bool __in_atomic_pool(void *start, size_t size)
378 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
381 static int __free_from_pool(void *start, size_t size)
383 if (!__in_atomic_pool(start, size))
386 gen_pool_free(atomic_pool, (unsigned long)start, size);
391 static void *__alloc_from_contiguous(struct device *dev, size_t size,
396 unsigned long order = get_order(size);
397 size_t count = size >> PAGE_SHIFT;
405 __dma_clear_buffer(page, size, coherent_flag);
411 ptr = dma_common_contiguous_remap(page, size, prot, caller);
417 __dma_remap(page, size, prot);
427 void *cpu_addr, size_t size, bool want_vaddr)
431 dma_common_free_remap(cpu_addr, size);
433 __dma_remap(page, size, PAGE_KERNEL);
435 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
446 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
451 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
462 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
468 __dma_free_buffer(args->page, args->size);
479 return __alloc_from_contiguous(args->dev, args->size, args->prot,
488 args->size, args->want_vaddr);
499 return __alloc_from_pool(args->size, ret_page);
504 __free_from_pool(args->cpu_addr, args->size);
515 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
523 dma_common_free_remap(args->cpu_addr, args->size);
525 __dma_free_buffer(args->page, args->size);
533 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
544 .size = PAGE_ALIGN(size),
554 if (limit && size >= limit) {
556 size, mask);
605 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
613 .size = PAGE_ALIGN(size),
628 size_t size, enum dma_data_direction dir,
632 size_t left = size;
680 size_t size, enum dma_data_direction dir)
684 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
688 outer_inv_range(paddr, paddr + size);
690 outer_clean_range(paddr, paddr + size);
696 size_t size, enum dma_data_direction dir)
703 outer_inv_range(paddr, paddr + size);
705 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
711 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
718 if (size < sz)
723 size -= sz;
724 if (!size)
757 size_t size)
759 unsigned int order = get_order(size);
770 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
788 * address range of size bytes.
815 dma_addr_t addr, size_t size)
823 if (!size)
833 if (addr + size > bitmap_base + mapping_size) {
842 count = size >> PAGE_SHIFT;
852 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
857 int count = size >> PAGE_SHIFT;
871 unsigned long order = get_order(size);
879 __dma_clear_buffer(page, size, coherent_flag);
944 size_t size, unsigned long attrs)
946 int count = size >> PAGE_SHIFT;
965 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
969 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
973 dma_addr = __alloc_iova(mapping, size);
1001 __free_iova(mapping, dma_addr, size);
1005 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1010 * add optional in-page offset from iova to size and align
1011 * result to page size
1013 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1016 iommu_unmap(mapping->domain, iova, size);
1017 __free_iova(mapping, iova, size);
1043 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1051 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1053 addr = __alloc_from_pool(size, &page);
1057 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1064 __free_from_pool(addr, size);
1069 dma_addr_t handle, size_t size, int coherent_flag)
1071 __iommu_remove_mapping(dev, handle, size);
1073 __dma_free_buffer(virt_to_page(cpu_addr), size);
1075 __free_from_pool(cpu_addr, size);
1078 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1087 size = PAGE_ALIGN(size);
1090 return __iommu_alloc_simple(dev, size, gfp, handle,
1093 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1097 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1104 addr = dma_common_pages_remap(pages, size, prot,
1112 __iommu_remove_mapping(dev, *handle, size);
1114 __iommu_free_buffer(dev, pages, size, attrs);
1119 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1123 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1146 static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1151 size = PAGE_ALIGN(size);
1153 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1154 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1165 dma_common_free_remap(cpu_addr, size);
1167 __iommu_remove_mapping(dev, handle, size);
1168 __iommu_free_buffer(dev, pages, size, attrs);
1173 size_t size, unsigned long attrs)
1175 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1181 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1189 size_t size, dma_addr_t *handle,
1199 size = PAGE_ALIGN(size);
1202 iova_base = iova = __alloc_iova(mapping, size);
1206 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1227 __free_iova(mapping, iova_base, size);
1249 unsigned int size = s->offset + s->length;
1257 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1258 ret = __map_sg_chunk(dev, start, size,
1264 dma->dma_length = size - offset;
1266 size = offset = s->offset;
1271 size += s->length;
1273 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
1278 dma->dma_length = size - offset;
1366 * @size: size of buffer to map
1372 unsigned long offset, size_t size, enum dma_data_direction dir,
1377 int ret, prot, len = PAGE_ALIGN(size + offset);
1380 __dma_page_cpu_to_dev(page, offset, size, dir);
1403 * @size: size of buffer (same as passed to dma_map_page)
1409 size_t size, enum dma_data_direction dir, unsigned long attrs)
1415 int len = PAGE_ALIGN(size + offset);
1422 __dma_page_dev_to_cpu(page, offset, size, dir);
1433 * @size: size of resource to map
1437 phys_addr_t phys_addr, size_t size,
1445 size_t len = PAGE_ALIGN(size + offset);
1467 * @size: size of resource to map
1471 size_t size, enum dma_data_direction dir,
1477 size_t len = PAGE_ALIGN(size + offset);
1487 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1498 __dma_page_dev_to_cpu(page, offset, size, dir);
1502 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1513 __dma_page_cpu_to_dev(page, offset, size, dir);
1540 * @size: maximum size of the valid IO address space
1550 arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
1552 unsigned int bits = size >> PAGE_SHIFT;
1559 if (size > DMA_BIT_MASK(32) + 1)
1715 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1720 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
1723 size, dev_name(dev));
1750 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1759 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1780 arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
1796 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
1800 size, dir);
1803 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
1807 size, dir);
1810 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
1813 return __dma_alloc(dev, size, dma_handle, gfp,
1818 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
1821 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);