/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu/ |
H A D | arm-smmu.c | 261 static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, in arm_smmu_tlb_inv_range_s1() argument 273 iova = (iova >> 12) << 12; in arm_smmu_tlb_inv_range_s1() 274 iova |= cfg->asid; in arm_smmu_tlb_inv_range_s1() 276 arm_smmu_cb_write(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s1() 277 iova += granule; in arm_smmu_tlb_inv_range_s1() 280 iova >>= 12; in arm_smmu_tlb_inv_range_s1() 281 iova |= (u64)cfg->asid << 48; in arm_smmu_tlb_inv_range_s1() 283 arm_smmu_cb_writeq(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s1() 284 iova in arm_smmu_tlb_inv_range_s1() 289 arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, size_t granule, void *cookie, int reg) arm_smmu_tlb_inv_range_s2() argument 309 arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, size_t granule, void *cookie) arm_smmu_tlb_inv_walk_s1() argument 324 arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) arm_smmu_tlb_add_page_s1() argument 332 arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, size_t granule, void *cookie) arm_smmu_tlb_inv_walk_s2() argument 340 arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) arm_smmu_tlb_add_page_s2() argument 348 arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size, size_t granule, void *cookie) arm_smmu_tlb_inv_walk_s2_v1() argument 360 arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) arm_smmu_tlb_add_page_s2_v1() argument 394 unsigned long iova; arm_smmu_context_fault() local 1177 arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) arm_smmu_map_pages() argument 1195 arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *iotlb_gather) arm_smmu_unmap_pages() argument 1243 arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, dma_addr_t iova) arm_smmu_iova_to_phys_hard() argument 1295 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) arm_smmu_iova_to_phys() argument [all...] |
/kernel/linux/linux-6.6/drivers/s390/cio/ |
H A D | vfio_ccw_cp.c | 45 * @len: number of pages that should be pinned from @iova 176 static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length) in page_array_iova_pinned() argument 178 u64 iova_pfn_start = iova >> PAGE_SHIFT; in page_array_iova_pinned() 179 u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT; in page_array_iova_pinned() 364 * @iova: guest physical address of the target ccw chain 375 static int ccwchain_calc_length(u64 iova, struct channel_program *cp) in ccwchain_calc_length() argument 391 if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt)) in ccwchain_calc_length() 564 u64 iova; in ccw_count_idaws() local 575 ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false); in ccw_count_idaws() 584 iova in ccw_count_idaws() 941 cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length) cp_iova_pinned() argument [all...] |
/device/soc/rockchip/rk3588/kernel/drivers/video/rockchip/rga3/ |
H A D | rga_dma_buf.c | 369 unsigned long shift, iova_len, iova = 0; in rga_iommu_dma_alloc_iova() local 387 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); in rga_iommu_dma_alloc_iova() 389 return (dma_addr_t)iova << shift; in rga_iommu_dma_alloc_iova() 393 dma_addr_t iova, size_t size) in rga_iommu_dma_free_iova() 397 free_iova_fast(iovad, iova_pfn(iovad, iova), in rga_iommu_dma_free_iova() 412 iommu_unmap(buffer->domain, buffer->iova, buffer->size); in rga_viraddr_put_channel_info() 413 rga_iommu_dma_free_iova(buffer->cookie, buffer->iova, buffer->size); in rga_viraddr_put_channel_info() 424 if (virt_dma_buf->iova == 0) in rga_iommu_unmap_virt_addr() 427 iommu_unmap(virt_dma_buf->domain, virt_dma_buf->iova, virt_dma_buf->size); in rga_iommu_unmap_virt_addr() 428 rga_iommu_dma_free_iova(virt_dma_buf->cookie, virt_dma_buf->iova, virt_dma_bu in rga_iommu_unmap_virt_addr() 392 rga_iommu_dma_free_iova(struct rga_iommu_dma_cookie *cookie, dma_addr_t iova, size_t size) rga_iommu_dma_free_iova() argument 443 dma_addr_t iova; rga_iommu_map_virt_addr() local 514 dma_addr_t iova; rga_viraddr_get_channel_info() local [all...] |
/kernel/linux/linux-5.10/drivers/fpga/ |
H A D | dfl-afu.h | 48 * @iova: region IO virtual address. 56 u64 iova; member 99 u64 user_addr, u64 length, u64 *iova); 100 int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova); 103 u64 iova, u64 size);
|
/kernel/linux/linux-6.6/drivers/fpga/ |
H A D | dfl-afu.h | 48 * @iova: region IO virtual address. 56 u64 iova; member 99 u64 user_addr, u64 length, u64 *iova); 100 int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova); 103 u64 iova, u64 size);
|
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/ |
H A D | msm_mmu.h | 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 30 int (*handler)(void *arg, unsigned long iova, int flags, void *data); 48 int (*handler)(void *arg, unsigned long iova, int flags, void *data)) in msm_mmu_set_fault_handler() 47 msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, int (*handler)(void *arg, unsigned long iova, int flags, void *data)) msm_mmu_set_fault_handler() argument
|
H A D | msm_gem.h | 62 uint64_t iova; member 127 struct msm_gem_address_space *aspace, uint64_t *iova); 129 struct msm_gem_address_space *aspace, uint64_t iova); 131 struct msm_gem_address_space *aspace, uint64_t *iova, 134 struct msm_gem_address_space *aspace, uint64_t *iova); 159 struct drm_gem_object **bo, uint64_t *iova); 283 uint64_t iova; member 299 uint64_t iova; member
|
H A D | msm_gem.c | 409 * iova range) in addition to removing the iommu mapping. In the eviction 410 * case (!close), we keep the iova allocated, but only remove the iommu 468 GEM_WARN_ON(vma->iova < range_start); in get_vma_locked() 469 GEM_WARN_ON((vma->iova + obj->size) > range_end); in get_vma_locked() 535 struct msm_gem_address_space *aspace, uint64_t *iova, in get_and_pin_iova_range_locked() 549 *iova = vma->iova; in get_and_pin_iova_range_locked() 557 * get iova and pin it. Should have a matching put 558 * limits iova to specified range (in pages) 561 struct msm_gem_address_space *aspace, uint64_t *iova, in msm_gem_get_and_pin_iova_range() 534 get_and_pin_iova_range_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) get_and_pin_iova_range_locked() argument 560 msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) msm_gem_get_and_pin_iova_range() argument 574 msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) msm_gem_get_and_pin_iova() argument 584 msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) msm_gem_get_iova() argument 624 msm_gem_set_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t iova) msm_gem_set_iova() argument 1313 msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags, struct msm_gem_address_space *aspace, struct drm_gem_object **bo, uint64_t *iova) msm_gem_kernel_new() argument [all...] |
H A D | msm_gpummu.c | 28 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument 32 unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; in msm_gpummu_map() 56 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in msm_gpummu_unmap() argument 59 unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; in msm_gpummu_unmap()
|
/kernel/linux/linux-5.10/drivers/staging/media/ipu3/ |
H A D | ipu3-mmu.h | 30 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, 32 size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova, 34 size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
|
/kernel/linux/linux-6.6/drivers/staging/media/ipu3/ |
H A D | ipu3-mmu.h | 30 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, 32 size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova, 34 size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | mr.c | 56 u64 iova, int access_flags, 1130 u64 length, int access_flags, u64 iova) in set_mr_fields() 1136 mr->ibmr.iova = iova; in set_mr_fields() 1141 u64 iova) in mlx5_umem_dmabuf_default_pgsz() 1144 * The alignment of iova has already been checked upon entering in mlx5_umem_dmabuf_default_pgsz() 1147 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz() 1152 struct ib_umem *umem, u64 iova, in alloc_cacheable_mr() 1164 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); in alloc_cacheable_mr() 1129 set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, u64 length, int access_flags, u64 iova) set_mr_fields() argument 1140 mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, u64 iova) mlx5_umem_dmabuf_default_pgsz() argument 1151 alloc_cacheable_mr(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags) alloc_cacheable_mr() argument 1205 reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, unsigned int page_size, bool populate) reg_create() argument 1388 create_real_mr(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags) create_real_mr() argument 1431 create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, u64 iova, int access_flags, struct ib_udata *udata) create_user_odp_mr() argument 1489 mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 iova, int access_flags, struct ib_udata *udata) mlx5_ib_reg_user_mr() argument 1603 can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_umem *new_umem, int new_access_flags, u64 iova, unsigned long *page_size) can_use_umr_rereg_pas() argument 1624 umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, int access_flags, int flags, struct ib_umem *new_umem, u64 iova, unsigned long page_size) umr_rereg_pas() argument 1671 mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length, u64 iova, int new_access_flags, struct ib_pd *new_pd, struct ib_udata *udata) mlx5_ib_rereg_user_mr() argument 2477 u64 iova = pi_mr->data_iova; mlx5_ib_map_mtt_mr_sg_pi() local [all...] |
/kernel/linux/linux-5.10/drivers/vhost/ |
H A D | vdpa.c | 567 u64 iova, u64 size, u64 pa, u32 perm) in vhost_vdpa_map() 574 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1, in vhost_vdpa_map() 580 r = ops->dma_map(vdpa, iova, size, pa, perm); in vhost_vdpa_map() 585 r = iommu_map(v->domain, iova, pa, size, in vhost_vdpa_map() 590 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1); in vhost_vdpa_map() 597 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) in vhost_vdpa_unmap() argument 603 vhost_vdpa_iotlb_unmap(v, iova, iova in vhost_vdpa_unmap() 566 vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, u64 size, u64 pa, u32 perm) vhost_vdpa_map() argument 625 u64 iova = msg->iova; vhost_vdpa_process_iotlb_update() local [all...] |
/kernel/linux/linux-5.10/arch/arm/mm/ |
H A D | dma-mapping.c | 1109 dma_addr_t iova; in __alloc_iova() local 1153 iova = mapping->base + (mapping_size * i); in __alloc_iova() 1154 iova += start << PAGE_SHIFT; in __alloc_iova() 1156 return iova; in __alloc_iova() 1180 * The address range to be freed reaches into the iova in __free_iova() 1315 dma_addr_t dma_addr, iova; in __iommu_create_mapping() local 1322 iova = dma_addr; in __iommu_create_mapping() 1335 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping() 1339 iova += len; in __iommu_create_mapping() 1344 iommu_unmap(mapping->domain, dma_addr, iova in __iommu_create_mapping() 1349 __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) __iommu_remove_mapping() argument 1583 dma_addr_t iova, iova_base; __map_sg_chunk() local 1859 dma_addr_t iova = handle & PAGE_MASK; arm_coherent_iommu_unmap_page() local 1883 dma_addr_t iova = handle & PAGE_MASK; arm_iommu_unmap_page() local 1944 dma_addr_t iova = dma_handle & PAGE_MASK; arm_iommu_unmap_resource() local 1959 dma_addr_t iova = handle & PAGE_MASK; arm_iommu_sync_single_for_cpu() local 1973 dma_addr_t iova = handle & PAGE_MASK; arm_iommu_sync_single_for_device() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | qos_sq.c | 42 u64 iova, pa; in otx2_qos_sq_aura_pool_init() local 98 iova = sq->sqb_ptrs[ptr]; in otx2_qos_sq_aura_pool_init() 99 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_qos_sq_aura_pool_init() 100 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, in otx2_qos_sq_aura_pool_init() 121 u64 iova, pa; in otx2_qos_sq_free_sqbs() local 130 iova = sq->sqb_ptrs[sqb]; in otx2_qos_sq_free_sqbs() 131 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_qos_sq_free_sqbs() 132 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, in otx2_qos_sq_free_sqbs()
|
/kernel/linux/linux-6.6/drivers/iommu/ |
H A D | iommu.c | 2355 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument 2358 return iova; in iommu_iova_to_phys() 2363 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys() 2367 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, in iommu_pgsize() argument 2373 unsigned long addr_merge = paddr | iova; in iommu_pgsize() 2403 if ((iova ^ paddr) & (pgsize_next - 1)) in iommu_pgsize() 2421 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, in __iommu_map_pages() argument 2429 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in __iommu_map_pages() 2431 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", in __iommu_map_pages() 2432 iova, in __iommu_map_pages() 2445 __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) __iommu_map() argument 2505 iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) iommu_map() argument 2526 __iommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) __iommu_unmap_pages() argument 2539 __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) __iommu_unmap() argument 2593 iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) iommu_unmap() argument 2607 iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) iommu_unmap_fast() argument 2615 iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) iommu_map_sg() argument 2697 report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags) report_iommu_fault() argument [all...] |
H A D | msm_iommu.c | 138 static void __flush_iotlb_range(unsigned long iova, size_t size, in __flush_iotlb_range() argument 155 iova &= TLBIVA_VA; in __flush_iotlb_range() 156 iova |= GET_CONTEXTIDR_ASID(iommu->base, in __flush_iotlb_range() 158 SET_TLBIVA(iommu->base, master->num, iova); in __flush_iotlb_range() 159 iova += granule; in __flush_iotlb_range() 170 static void __flush_iotlb_walk(unsigned long iova, size_t size, in __flush_iotlb_walk() argument 173 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk() 177 unsigned long iova, size_t granule, void *cookie) in __flush_iotlb_page() 179 __flush_iotlb_range(iova, granule, granule, true, cookie); in __flush_iotlb_page() 473 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, in msm_iommu_map() argument 176 __flush_iotlb_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) __flush_iotlb_page() argument 489 msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) msm_iommu_sync_map() argument 497 msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) msm_iommu_unmap() argument [all...] |
/kernel/linux/linux-6.6/arch/arm/mm/ |
H A D | dma-mapping.c | 764 dma_addr_t iova; in __alloc_iova() local 808 iova = mapping->base + (mapping_size * i); in __alloc_iova() 809 iova += start << PAGE_SHIFT; in __alloc_iova() 811 return iova; in __alloc_iova() 835 * The address range to be freed reaches into the iova in __free_iova() 970 dma_addr_t dma_addr, iova; in __iommu_create_mapping() local 977 iova = dma_addr; in __iommu_create_mapping() 990 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping() 995 iova += len; in __iommu_create_mapping() 1000 iommu_unmap(mapping->domain, dma_addr, iova in __iommu_create_mapping() 1005 __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) __iommu_remove_mapping() argument 1193 dma_addr_t iova, iova_base; __map_sg_chunk() local 1412 dma_addr_t iova = handle & PAGE_MASK; arm_iommu_unmap_page() local 1475 dma_addr_t iova = dma_handle & PAGE_MASK; arm_iommu_unmap_resource() local 1490 dma_addr_t iova = handle & PAGE_MASK; arm_iommu_sync_single_for_cpu() local 1505 dma_addr_t iova = handle & PAGE_MASK; arm_iommu_sync_single_for_device() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/tegra/ |
H A D | vic.c | 147 (vic->falcon.firmware.iova + fce_bin_data_offset) in vic_boot() 234 vic->falcon.firmware.iova); in vic_exit() 238 vic->falcon.firmware.iova); in vic_exit() 253 dma_addr_t iova; in vic_load_firmware() local 268 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); in vic_load_firmware() 272 virt = tegra_drm_alloc(tegra, size, &iova); in vic_load_firmware() 276 vic->falcon.firmware.iova = iova; in vic_load_firmware() 303 dma_free_coherent(vic->dev, size, virt, iova); in vic_load_firmware() 305 tegra_drm_free(tegra, size, virt, iova); in vic_load_firmware() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/tegra/ |
H A D | vic.c | 105 (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); in vic_boot() 195 vic->falcon.firmware.iova); in vic_exit() 199 vic->falcon.firmware.iova); in vic_exit() 216 dma_addr_t iova; in vic_load_firmware() local 235 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); in vic_load_firmware() 241 virt = tegra_drm_alloc(tegra, size, &iova); in vic_load_firmware() 249 vic->falcon.firmware.iova = iova; in vic_load_firmware() 297 dma_free_coherent(vic->dev, size, virt, iova); in vic_load_firmware() 299 tegra_drm_free(tegra, size, virt, iova); in vic_load_firmware() [all...] |
H A D | nvdec.c | 229 nvdec->falcon.firmware.iova); in nvdec_exit() 233 nvdec->falcon.firmware.iova); in nvdec_exit() 248 dma_addr_t iova; in nvdec_load_falcon_firmware() local 263 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); in nvdec_load_falcon_firmware() 265 err = dma_mapping_error(nvdec->dev, iova); in nvdec_load_falcon_firmware() 269 virt = tegra_drm_alloc(tegra, size, &iova); in nvdec_load_falcon_firmware() 275 nvdec->falcon.firmware.iova = iova; in nvdec_load_falcon_firmware() 302 dma_free_coherent(nvdec->dev, size, virt, iova); in nvdec_load_falcon_firmware() 304 tegra_drm_free(tegra, size, virt, iova); in nvdec_load_falcon_firmware() [all...] |
/third_party/mesa3d/src/freedreno/decode/ |
H A D | crashdec.c | 216 uint64_t iova; member 232 } else if (startswith(line, " iova:")) { in decode_ringbuffer() 233 parseline(line, " iova: %" PRIx64, &ringbuffers[id].iova); in decode_ringbuffer() 242 add_buffer(ringbuffers[id].iova, ringbuffers[id].size, in decode_ringbuffer() 258 uint64_t iova; in decode_gmu_log() local 262 if (startswith(line, " iova:")) { in decode_gmu_log() 263 parseline(line, " iova: %" PRIx64, &iova); in decode_gmu_log() 293 if (startswith(line, " iova in decode_gmu_hfi() 434 uint64_t iova = 0; decode_bos() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
H A D | msm_gpummu.c | 28 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument 32 unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; in msm_gpummu_map() 56 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in msm_gpummu_unmap() argument 59 unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; in msm_gpummu_unmap()
|
/kernel/linux/linux-5.10/drivers/iommu/ |
H A D | msm_iommu.c | 139 static void __flush_iotlb_range(unsigned long iova, size_t size, in __flush_iotlb_range() argument 156 iova &= TLBIVA_VA; in __flush_iotlb_range() 157 iova |= GET_CONTEXTIDR_ASID(iommu->base, in __flush_iotlb_range() 159 SET_TLBIVA(iommu->base, master->num, iova); in __flush_iotlb_range() 160 iova += granule; in __flush_iotlb_range() 171 static void __flush_iotlb_walk(unsigned long iova, size_t size, in __flush_iotlb_walk() argument 174 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk() 177 static void __flush_iotlb_leaf(unsigned long iova, size_t size, in __flush_iotlb_leaf() argument 180 __flush_iotlb_range(iova, size, granule, true, cookie); in __flush_iotlb_leaf() 184 unsigned long iova, size_ in __flush_iotlb_page() 183 __flush_iotlb_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) __flush_iotlb_page() argument 486 msm_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t pa, size_t len, int prot, gfp_t gfp) msm_iommu_map() argument 500 msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t len, struct iommu_iotlb_gather *gather) msm_iommu_unmap() argument [all...] |
H A D | iommu.c | 2331 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument 2336 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys() 2372 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, in __iommu_map() argument 2376 unsigned long orig_iova = iova; in __iommu_map() 2397 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { in __iommu_map() 2398 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", in __iommu_map() 2399 iova, &paddr, size, min_pagesz); in __iommu_map() 2403 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); in __iommu_map() 2406 size_t pgsize = iommu_pgsize(domain, iova | padd in __iommu_map() 2429 _iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) _iommu_map() argument 2442 iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) iommu_map() argument 2450 iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) iommu_map_atomic() argument 2457 __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) __iommu_unmap() argument 2511 iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) iommu_unmap() argument 2525 iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) iommu_unmap_fast() argument 2533 __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) __iommu_map_sg() argument 2580 iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) iommu_map_sg() argument 2588 iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) iommu_map_sg_atomic() argument 2639 report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags) report_iommu_fault() argument [all...] |