Lines Matching defs:page
28 #include <asm/page.h>
55 struct page *page;
64 struct page **ret_page);
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
113 if (PageHighMem(page)) {
114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
117 void *ptr = kmap_atomic(page);
122 page++;
128 void *ptr = page_address(page);
139 * specified gfp mask. Note that 'size' must be page aligned.
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
145 struct page *page, *p, *e;
147 page = alloc_pages(gfp, order);
148 if (!page)
152 * Now split the huge page and free the excess pages
154 split_page(page, order);
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
158 __dma_clear_buffer(page, size, coherent_flag);
160 return page;
164 * Free a DMA buffer. 'size' must be page aligned.
166 static void __dma_free_buffer(struct page *page, size_t size)
168 struct page *e = page + (size >> PAGE_SHIFT);
170 while (page < e) {
171 __free_page(page);
172 page++;
177 pgprot_t prot, struct page **ret_page,
182 pgprot_t prot, struct page **ret_page,
204 struct page *page;
216 &page, atomic_pool_init, true, NORMAL,
220 &page, atomic_pool_init, true);
225 page_to_phys(page),
312 struct page *page = virt_to_page((void *)addr);
315 set_pte_ext(pte, mk_pte(page, prot), 0);
319 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
321 unsigned long start = (unsigned long) page_address(page);
329 pgprot_t prot, struct page **ret_page,
332 struct page *page;
338 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
339 if (!page)
344 ptr = dma_common_contiguous_remap(page, size, prot, caller);
346 __dma_free_buffer(page, size);
351 *ret_page = page;
355 static void *__alloc_from_pool(size_t size, struct page **ret_page)
392 pgprot_t prot, struct page **ret_page,
398 struct page *page;
401 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
402 if (!page)
405 __dma_clear_buffer(page, size, coherent_flag);
410 if (PageHighMem(page)) {
411 ptr = dma_common_contiguous_remap(page, size, prot, caller);
413 dma_release_from_contiguous(dev, page, count);
417 __dma_remap(page, size, prot);
418 ptr = page_address(page);
422 *ret_page = page;
426 static void __free_from_contiguous(struct device *dev, struct page *page,
430 if (PageHighMem(page))
433 __dma_remap(page, size, PAGE_KERNEL);
435 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
447 struct page **ret_page)
449 struct page *page;
451 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
452 if (!page)
455 *ret_page = page;
456 return page_address(page);
460 struct page **ret_page)
468 __dma_free_buffer(args->page, args->size);
477 struct page **ret_page)
487 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
497 struct page **ret_page)
513 struct page **ret_page)
525 __dma_free_buffer(args->page, args->size);
538 struct page *page = NULL;
584 addr = buf->allocator->alloc(&args, &page);
586 if (page) {
589 *handle = phys_to_dma(dev, page_to_phys(page));
590 buf->virt = args.want_vaddr ? addr : page;
599 return args.want_vaddr ? addr : page;
609 struct page *page = phys_to_page(dma_to_phys(dev, handle));
615 .page = page,
627 static void dma_cache_maint_page(struct page *page, unsigned long offset,
634 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
647 page = pfn_to_page(pfn);
649 if (PageHighMem(page)) {
654 vaddr = kmap_atomic(page);
658 vaddr = kmap_high_get(page);
661 kunmap_high(page);
665 vaddr = page_address(page) + offset;
679 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
684 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
686 paddr = page_to_phys(page) + off;
695 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
698 phys_addr_t paddr = page_to_phys(page) + off;
705 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
852 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
856 struct page **pages;
858 int array_size = count * sizeof(struct page *);
872 struct page *page;
874 page = dma_alloc_from_contiguous(dev, count, order,
876 if (!page)
879 __dma_clear_buffer(page, size, coherent_flag);
882 pages[i] = page + i;
943 static int __iommu_free_buffer(struct device *dev, struct page **pages,
965 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1010 * add optional in-page offset from iova to size and align
1011 * result to page size
1021 static struct page **__atomic_get_pages(void *addr)
1023 struct page *page;
1027 page = phys_to_page(phys);
1029 return (struct page **)page;
1032 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1047 struct page *page;
1051 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1053 addr = __alloc_from_pool(size, &page);
1057 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1082 struct page **pages;
1122 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1143 * free a page as defined by the above mapping.
1150 struct page **pages;
1176 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1364 * @page: page that buffer resides in
1365 * @offset: offset into page for start of buffer
1371 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1380 __dma_page_cpu_to_dev(page, offset, size, dir);
1388 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
1413 struct page *page;
1421 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1422 __dma_page_dev_to_cpu(page, offset, size, dir);
1491 struct page *page;
1497 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1498 __dma_page_dev_to_cpu(page, offset, size, dir);
1506 struct page *page;
1512 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1513 __dma_page_cpu_to_dev(page, offset, size, dir);