Lines Matching refs:pages

570 static void iommu_dma_free_pages_ext(struct page **pages, int count)
573 __free_page(pages[count]);
575 kvfree(pages);
581 struct page **pages;
589 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
590 if (!pages) {
594 /* IOMMU can map any pages, so himem can also be used here */
597 /* It makes no sense to muck about with huge pages */
627 iommu_dma_free_pages_ext(pages, i);
632 pages[i++] = page++;
635 return pages;
649 * but an IOMMU which supports smaller pages might not map the whole thing.
662 struct page **pages;
685 pages = iommu_dma_alloc_pages_ext(dev, count, alloc_sizes >> PAGE_SHIFT, gfp);
686 if (!pages) {
696 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) {
711 vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0));
727 iommu_dma_free_pages_ext(pages, count);
733 * @pages: Array representing buffer from __iommu_dma_alloc()
737 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
740 static int iommu_dma_mmap_ext(struct page **pages, size_t size, struct vm_area_struct *vma)
742 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
903 * aligned to IOMMU pages. Hence the need for this complicated bit of
1033 struct page *page = NULL, **pages = NULL;
1045 pages = dma_common_find_pages(cpu_addr);
1046 if (!pages) {
1055 if (pages) {
1056 iommu_dma_free_pages_ext(pages, count);
1185 struct page **pages = dma_common_find_pages(cpu_addr);
1187 if (pages) {
1188 return iommu_dma_mmap_ext(pages, size, vma);
1205 struct page **pages = dma_common_find_pages(cpu_addr);
1207 if (pages) {
1208 return sg_alloc_table_from_pages(sgt, pages, PAGE_ALIGN(size) >> PAGE_SHIFT, 0, size, GFP_KERNEL);