Lines Matching defs:dma_addr
99 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
104 rtx = calc_rtx(dma_addr);
109 sx = calc_sx(dma_addr);
114 px = calc_px(dma_addr);
134 dma_addr_t dma_addr, size_t size, int flags)
152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
159 dma_addr += PAGE_SIZE;
167 dma_addr -= PAGE_SIZE;
168 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
179 static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
200 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
219 dma_addr_t dma_addr, size_t size, int flags)
223 rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
227 rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
229 __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
303 static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
308 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
342 dma_addr_t dma_addr;
347 dma_addr = dma_alloc_address(dev, nr_pages);
348 if (dma_addr == DMA_MAPPING_ERROR) {
359 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
364 return dma_addr + (offset & ~PAGE_MASK);
367 dma_free_address(dev, dma_addr, nr_pages);
374 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
381 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
382 dma_addr = dma_addr & PAGE_MASK;
383 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
387 zpci_err_dma(ret, dma_addr);
392 dma_free_address(dev, dma_addr, npages);
441 dma_addr_t dma_addr_base, dma_addr;
451 dma_addr = dma_addr_base;
455 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
457 ret = __dma_update_trans(zdev, pa, dma_addr,
462 dma_addr += s->offset + s->length;
474 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,