Lines Matching defs:arena

64 	struct pci_iommu_arena *arena;
69 not addition, so the required arena alignment is based on
71 particular systems can over-align the arena. */
78 arena = memblock_alloc_node(sizeof(*arena), align, nid);
79 if (!NODE_DATA(nid) || !arena) {
80 printk("%s: couldn't allocate arena from node %d\n"
83 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
84 if (!arena)
86 sizeof(*arena));
89 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
90 if (!NODE_DATA(nid) || !arena->ptes) {
91 printk("%s: couldn't allocate arena ptes from node %d\n"
94 arena->ptes = memblock_alloc(mem_size, align);
95 if (!arena->ptes)
102 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
103 if (!arena)
105 sizeof(*arena));
106 arena->ptes = memblock_alloc(mem_size, align);
107 if (!arena->ptes)
113 spin_lock_init(&arena->lock);
114 arena->hose = hose;
115 arena->dma_base = base;
116 arena->size = window_size;
117 arena->next_entry = 0;
121 arena->align_entry = 1;
123 return arena;
133 /* Must be called with the arena lock held */
135 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
144 base = arena->dma_base >> PAGE_SHIFT;
148 ptes = arena->ptes;
149 nent = arena->size >> PAGE_SHIFT;
150 p = ALIGN(arena->next_entry, mask + 1);
172 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
188 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
195 spin_lock_irqsave(&arena->lock, flags);
198 ptes = arena->ptes;
199 mask = max(align, arena->align_entry) - 1;
200 p = iommu_arena_find_pages(dev, arena, n, mask);
202 spin_unlock_irqrestore(&arena->lock, flags);
213 arena->next_entry = p + n;
214 spin_unlock_irqrestore(&arena->lock, flags);
220 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
225 p = arena->ptes + ofs;
265 struct pci_iommu_arena *arena;
305 arena = hose->sg_pci;
306 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
307 arena = hose->sg_isa;
314 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
323 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
325 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
386 struct pci_iommu_arena *arena;
407 arena = hose->sg_pci;
408 if (!arena || dma_addr < arena->dma_base)
409 arena = hose->sg_isa;
411 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
412 if (dma_ofs * PAGE_SIZE >= arena->size) {
415 dma_addr, arena->dma_base, arena->size);
422 spin_lock_irqsave(&arena->lock, flags);
424 iommu_arena_free(arena, dma_ofs, npages);
429 if (dma_ofs >= arena->next_entry)
432 spin_unlock_irqrestore(&arena->lock, flags);
564 struct scatterlist *out, struct pci_iommu_arena *arena,
605 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
614 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
617 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
625 ptes = &arena->ptes[dma_ofs];
668 struct pci_iommu_arena *arena;
695 arena = hose->sg_pci;
696 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
697 arena = hose->sg_isa;
700 arena = NULL;
709 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
746 struct pci_iommu_arena *arena;
758 arena = hose->sg_pci;
759 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
760 arena = hose->sg_isa;
764 spin_lock_irqsave(&arena->lock, flags);
796 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
797 iommu_arena_free(arena, ofs, npages);
807 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
810 spin_unlock_irqrestore(&arena->lock, flags);
822 struct pci_iommu_arena *arena;
832 /* Check that we have a scatter-gather arena that fits. */
834 arena = hose->sg_isa;
835 if (arena && arena->dma_base + arena->size - 1 <= mask)
837 arena = hose->sg_pci;
838 if (arena && arena->dma_base + arena->size - 1 <= mask)
854 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
860 if (!arena) return -EINVAL;
862 spin_lock_irqsave(&arena->lock, flags);
865 ptes = arena->ptes;
866 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
868 spin_unlock_irqrestore(&arena->lock, flags);
878 arena->next_entry = p + pg_count;
879 spin_unlock_irqrestore(&arena->lock, flags);
885 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
890 if (!arena) return -EINVAL;
892 ptes = arena->ptes;
899 iommu_arena_free(arena, pg_start, pg_count);
904 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
911 if (!arena) return -EINVAL;
913 spin_lock_irqsave(&arena->lock, flags);
915 ptes = arena->ptes;
919 spin_unlock_irqrestore(&arena->lock, flags);
927 spin_unlock_irqrestore(&arena->lock, flags);
933 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
938 if (!arena) return -EINVAL;
940 p = arena->ptes + pg_start;