Home
last modified time | relevance | path

Searched refs:iova (Results 201 - 225 of 534) sorted by relevance

12345678910>>...22

/kernel/linux/linux-5.10/drivers/parisc/
H A Dsba_iommu.c307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
311 #define SBA_IOVP(ioc,iova) (iova)
482 * @iova: IO virtual address which was previously allocated.
488 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
490 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
501 __func__, (uint) iova, size, in sba_free_range()
593 * @iova: IO Virtual Address mapped earlier
607 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_ argument
794 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction, unsigned long attrs) sba_unmap_page() argument
[all...]
/kernel/linux/linux-6.6/drivers/parisc/
H A Dsba_iommu.c311 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
315 #define SBA_IOVP(ioc,iova) (iova)
488 * @iova: IO virtual address which was previously allocated.
494 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
496 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
507 __func__, (uint) iova, size, in sba_free_range()
599 * @iova: IO Virtual Address mapped earlier
613 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_ argument
801 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction, unsigned long attrs) sba_unmap_page() argument
[all...]
/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu/
H A Darm-smmu-nvidia.c177 unsigned long iova; in nvidia_smmu_context_fault_bank() local
186 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); in nvidia_smmu_context_fault_bank()
190 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", in nvidia_smmu_context_fault_bank()
191 fsr, iova, fsynr, cbfrsynra, idx); in nvidia_smmu_context_fault_bank()
/kernel/linux/linux-6.6/drivers/infiniband/hw/mana/
H A Dmr.c104 u64 iova, int access_flags, in mana_ib_reg_user_mr()
118 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", in mana_ib_reg_user_mr()
119 start, iova, length, access_flags); in mana_ib_reg_user_mr()
150 mr_params.gva.virtual_address = iova; in mana_ib_reg_user_mr()
103 mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 iova, int access_flags, struct ib_udata *udata) mana_ib_reg_user_mr() argument
/kernel/linux/linux-6.6/drivers/gpu/drm/tegra/
H A Ddrm.h11 #include <linux/iova.h>
126 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
128 dma_addr_t iova);
/kernel/linux/linux-5.10/drivers/iommu/loongson/
H A Diommu.c994 static unsigned long *iommu_get_spte(spt_entry *entry, unsigned long iova, int level) in iommu_get_spte() argument
1003 pte = iommu_shadow_offset(entry, iova, i); in iommu_get_spte()
1234 static int loongson_iommu_map(struct iommu_domain *domain, unsigned long iova, in loongson_iommu_map() argument
1239 return iommu_map_page(priv, iova, pa, len, prot, GFP_KERNEL); in loongson_iommu_map()
1242 static size_t loongson_iommu_unmap(struct iommu_domain *domain, unsigned long iova, in loongson_iommu_unmap() argument
1247 return domain_unmap_page(priv, iova, size); in loongson_iommu_unmap()
1251 dma_addr_t iova) in loongson_iommu_iova_to_pa()
1262 if ((iova >= IOVA_START) && (iova < IOVA_END0)) { in loongson_iommu_iova_to_pa()
1263 tmpva = iova in loongson_iommu_iova_to_pa()
1250 loongson_iommu_iova_to_pa(struct iommu_domain *domain, dma_addr_t iova) loongson_iommu_iova_to_pa() argument
1306 loongson_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) loongson_iommu_iova_to_phys() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/
H A Dmsm_drv.h283 struct msm_gem_address_space *aspace, uint64_t *iova);
285 struct msm_gem_address_space *aspace, uint64_t *iova,
288 struct msm_gem_address_space *aspace, uint64_t *iova);
326 struct drm_gem_object **bo, uint64_t *iova);
329 struct drm_gem_object **bo, uint64_t *iova);
/kernel/linux/linux-6.6/include/uapi/linux/
H A Dvfio.h1377 __aligned_u64 iova; member
1398 * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
1416 __aligned_u64 iova; member
1461 * outside the valid iova range will return error.
1525 * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
1529 * will result in user memory corruption and/or device misbehavior. iova and
1540 __u64 iova; /* IO virtual address */ member
1558 * field. No guarantee is made to the user that arbitrary unmaps of iova
1567 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1568 * indicates that the page at that offset from iova i
1586 __u64 iova; /* IO virtual address */ global() member
1644 __u64 iova; /* IO virtual address */ global() member
[all...]
H A Diommufd.h190 * @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set
194 * mapping will be established at iova, otherwise a suitable location based on
196 * iova.
198 * If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently
208 __aligned_u64 iova; member
224 * dst_ioas_id. The src iova/length must exactly match a range used with
247 * @iova: IOVA to start the unmapping at
250 * Unmap an IOVA range. The iova/length must be a superset of a previously
258 __aligned_u64 iova; member
H A Dvduse.h216 * @iova: start of the IOVA region
225 __u64 iova; member
233 /* De-register the userspace memory. Caller should set iova and size field. */
314 * @iova: IOVA range for updating
326 struct vduse_iova_range iova; member
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c418 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved()
421 mr->iova = iova; in mlx4_mr_alloc_reserved()
527 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, in mlx4_mr_alloc() argument
537 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, in mlx4_mr_alloc()
590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write()
599 mpt_entry->start = cpu_to_be64(iova); in mlx4_mr_rereg_mem_write()
649 mpt_entry->start = cpu_to_be64(mr->iova); in mlx4_mr_enable()
417 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) mlx4_mr_alloc_reserved() argument
589 mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry) mlx4_mr_rereg_mem_write() argument
/kernel/linux/linux-6.6/drivers/soc/apple/
H A Drtkit.c266 buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); in apple_rtkit_common_rx_get_buffer()
270 buffer->size, &buffer->iova); in apple_rtkit_common_rx_get_buffer()
272 if (buffer->iova && in apple_rtkit_common_rx_get_buffer()
284 &buffer->iova, GFP_KERNEL); in apple_rtkit_common_rx_get_buffer()
296 buffer->iova); in apple_rtkit_common_rx_get_buffer()
305 buffer->iova = 0; in apple_rtkit_common_rx_get_buffer()
320 dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova); in apple_rtkit_free_buffer()
324 bfr->iova = 0; in apple_rtkit_free_buffer()
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c418 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved()
421 mr->iova = iova; in mlx4_mr_alloc_reserved()
527 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, in mlx4_mr_alloc() argument
537 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, in mlx4_mr_alloc()
590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write()
599 mpt_entry->start = cpu_to_be64(iova); in mlx4_mr_rereg_mem_write()
649 mpt_entry->start = cpu_to_be64(mr->iova); in mlx4_mr_enable()
417 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) mlx4_mr_alloc_reserved() argument
589 mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry) mlx4_mr_rereg_mem_write() argument
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/adreno/
H A Da6xx_hfi.c196 msg.dbg_buffer_addr = (u32) gmu->debug.iova; in a6xx_hfi_send_gmu_init()
708 struct a6xx_hfi_queue_header *header, void *virt, u64 iova, in a6xx_hfi_queue_init()
720 header->iova = iova; in a6xx_hfi_queue_init()
761 hfi->iova + offset, 0); in a6xx_hfi_init()
766 hfi->iova + offset, gmu->legacy ? 4 : 1); in a6xx_hfi_init()
707 a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, struct a6xx_hfi_queue_header *header, void *virt, u64 iova, u32 id) a6xx_hfi_queue_init() argument
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/
H A Detnaviv_mmu.h25 int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
27 size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
/kernel/linux/linux-6.6/drivers/gpu/drm/etnaviv/
H A Detnaviv_mmu.h25 int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
27 size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
/third_party/mesa3d/src/freedreno/drm/virtio/
H A Dmsm_proto.h146 * set the BO's iova (to avoid extra guest -> host trip)
153 uint64_t iova; member
163 * Set the buffer iova (for imported BOs). Also used to release the iova
169 uint64_t iova; member
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_common.c1043 aq->cq.base = cq->cqe->iova; in otx2_cq_init()
1191 u64 iova, pa; in otx2_sq_free_sqbs() local
1200 iova = sq->sqb_ptrs[sqb]; in otx2_sq_free_sqbs()
1201 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_sq_free_sqbs()
1202 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, in otx2_sq_free_sqbs()
1212 u64 iova, int size) in otx2_free_bufs()
1217 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_free_bufs()
1223 dma_unmap_page_attrs(pfvf->dev, iova, size, in otx2_free_bufs()
1235 u64 iova; in otx2_free_aura_ptr() local
1250 iova in otx2_free_aura_ptr()
1211 otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size) otx2_free_bufs() argument
[all...]
/kernel/linux/linux-6.6/drivers/vdpa/vdpa_user/
H A Dvduse_dev.c78 unsigned long iova; member
314 msg.req.iova.start = start; in vduse_dev_update_iotlb()
315 msg.req.iova.last = last; in vduse_dev_update_iotlb()
829 unsigned long iova; in vduse_dev_alloc_coherent() local
834 (dma_addr_t *)&iova, flag, attrs); in vduse_dev_alloc_coherent()
838 *dma_addr = (dma_addr_t)iova; in vduse_dev_alloc_coherent()
995 u64 iova, u64 size) in vduse_dev_dereg_umem()
1008 if (dev->umem->iova != iova || size != dev->domain->bounce_size) in vduse_dev_dereg_umem()
1026 u64 iova, u6 in vduse_dev_reg_umem()
994 vduse_dev_dereg_umem(struct vduse_dev *dev, u64 iova, u64 size) vduse_dev_dereg_umem() argument
1025 vduse_dev_reg_umem(struct vduse_dev *dev, u64 iova, u64 uaddr, u64 size) vduse_dev_reg_umem() argument
[all...]
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/
H A Drxe_req.c37 wqe->iova += qp->mtu; in retry_first_write_send()
65 wqe->iova = (mask & WR_ATOMIC_MASK) ? in req_retry()
89 wqe->iova += npsn * qp->mtu; in req_retry()
410 reth_set_va(pkt, wqe->iova); in init_req_packet()
421 atmeth_set_va(pkt, wqe->iova); in init_req_packet()
622 rmr->iova = wqe->wr.wr.reg.mr->iova; in rxe_requester()
/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu-v3/
H A Darm-smmu-v3.c1616 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", in arm_smmu_handle_ppr()
1726 arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, in arm_smmu_atc_inv_to_cmd() argument
1760 page_start = iova >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
1761 page_end = (iova + size - 1) >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
1810 unsigned long iova, size_t size) in arm_smmu_atc_inv_domain()
1838 arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); in arm_smmu_atc_inv_domain()
1882 unsigned long iova, size_t size, in __arm_smmu_tlb_inv_range()
1887 unsigned long end = iova + size, num_pages = 0, tg = 0; in __arm_smmu_tlb_inv_range()
1919 while (iova < end) { in __arm_smmu_tlb_inv_range()
1945 cmd->tlbi.addr = iova; in __arm_smmu_tlb_inv_range()
1809 arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, unsigned long iova, size_t size) arm_smmu_atc_inv_domain() argument
1881 __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, unsigned long iova, size_t size, size_t granule, struct arm_smmu_domain *smmu_domain) __arm_smmu_tlb_inv_range() argument
1952 arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, size_t granule, bool leaf, struct arm_smmu_domain *smmu_domain) arm_smmu_tlb_inv_range_domain() argument
1979 arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, size_t granule, bool leaf, struct arm_smmu_domain *smmu_domain) arm_smmu_tlb_inv_range_asid() argument
1995 arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) arm_smmu_tlb_inv_page_nosync() argument
2005 arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) arm_smmu_tlb_inv_walk() argument
2482 arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) arm_smmu_map_pages() argument
2494 arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) arm_smmu_unmap_pages() argument
2529 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) arm_smmu_iova_to_phys() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/dsi/
H A Ddsi.h129 int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
130 int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
/third_party/mesa3d/src/freedreno/vulkan/
H A Dtu_cs_breadcrumbs.c248 cs, device->global_bo->iova + gb_offset(breadcrumb_gpu_sync_seqno)); in tu_cs_emit_sync_breadcrumb()
256 cs, device->global_bo->iova + gb_offset(breadcrumb_cpu_sync_seqno)); in tu_cs_emit_sync_breadcrumb()
/kernel/linux/linux-6.6/drivers/iommu/
H A Dmtk_iommu.c205 * The IOMMU HW may support 16GB iova. In order to balance the IOVA ranges,
206 * different masters will be put in different iova ranges, for example vcodec
210 * Here list the iova ranges this SoC supports and which larbs/ports are in
213 * 16GB iova all use one pgtable, but each a region is a iommu group.
308 #define MTK_IOMMU_TLB_ADDR(iova) ({ \
309 dma_addr_t _addr = iova; \
389 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, in mtk_iommu_tlb_flush_range_sync() argument
431 writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A); in mtk_iommu_tlb_flush_range_sync()
432 writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), in mtk_iommu_tlb_flush_range_sync()
511 "fault type=0x%x iova in mtk_iommu_isr()
779 mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) mtk_iommu_map() argument
793 mtk_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) mtk_iommu_unmap() argument
820 mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) mtk_iommu_sync_map() argument
828 mtk_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) mtk_iommu_iova_to_phys() argument
[all...]
/device/soc/rockchip/rk3588/kernel/drivers/video/rockchip/rga3/include/
H A Drga_drv.h51 #include <linux/iova.h>
147 dma_addr_t iova; member
182 dma_addr_t iova; member

Completed in 31 milliseconds

12345678910>>...22