Lines Matching refs:domain
47 struct iommu_domain *domain;
83 static int iommu_attach_device_ext(struct iommu_domain *domain, struct device *dev);
84 static int iommu_attach_group_ext(struct iommu_domain *domain, struct iommu_group *group);
85 static void iommu_detach_group_ext(struct iommu_domain *domain, struct iommu_group *group);
99 * Use a function instead of an array here because the domain-type is a
135 pr_info("Default domain type: %s %s\n", iommu_domain_type_str(iommu_def_domain_type),
261 * Try to allocate a default domain - needs support from the
725 struct iommu_domain *domain = group->default_domain;
731 if (!domain || domain->type != IOMMU_DOMAIN_DMA) {
735 BUG_ON(!domain->pgsize_bitmap);
737 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
747 if (domain->ops->apply_resv_region) {
748 domain->ops->apply_resv_region(dev, domain, entry);
761 phys_addr = iommu_iova_to_phys(domain, addr);
766 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
773 iommu_flush_iotlb_all(domain);
781 static bool iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev)
783 if (domain->ops->is_attach_deferred) {
784 return domain->ops->is_attach_deferred(domain, dev);
844 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) {
845 ret = iommu_attach_device_ext(group->domain, dev);
1207 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1209 if (!domain || !domain->ops->page_response) {
1255 ret = domain->ops->page_response(dev, evt, msg);
1510 "Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1520 if (!group->domain) {
1521 group->domain = dom;
1698 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1718 /* Ask for default domain requirements of all devices in the group */
1730 struct iommu_domain *domain = data;
1733 if (!iommu_is_attach_deferred(domain, dev)) {
1734 ret = iommu_attach_device_ext(domain, dev);
1747 struct iommu_domain *domain = data;
1749 if (domain->ops->probe_finalize) {
1750 domain->ops->probe_finalize(dev);
1782 * This code-path does not allocate the default domain when
1798 /* Try to allocate default domain */
1912 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1913 * @domain: iommu domain
1923 void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token)
1925 BUG_ON(!domain);
1927 domain->handler = handler;
1928 domain->handler_token = token;
1934 struct iommu_domain *domain;
1940 domain = bus->iommu_ops->domain_alloc(type);
1941 if (!domain) {
1945 domain->ops = bus->iommu_ops;
1946 domain->type = type;
1948 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1950 return domain;
1959 void iommu_domain_free(struct iommu_domain *domain)
1961 domain->ops->domain_free(domain);
1965 static int iommu_attach_device_ext(struct iommu_domain *domain, struct device *dev)
1969 if (unlikely(domain->ops->attach_dev == NULL)) {
1973 ret = domain->ops->attach_dev(domain, dev);
1980 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
2000 ret = iommu_attach_group_ext(domain, group);
2071 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, void __user *uinfo)
2077 if (unlikely(!domain->ops->cache_invalidate)) {
2125 return domain->ops->cache_invalidate(domain, dev, &inv_info);
2192 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, void __user *udata)
2197 if (unlikely(!domain->ops->sva_bind_gpasid)) {
2206 return domain->ops->sva_bind_gpasid(domain, dev, &data);
2210 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid)
2212 if (unlikely(!domain->ops->sva_unbind_gpasid)) {
2216 return domain->ops->sva_unbind_gpasid(dev, pasid);
2220 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, void __user *udata)
2225 if (unlikely(!domain->ops->sva_bind_gpasid)) {
2234 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2238 static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2240 if (iommu_is_attach_deferred(domain, dev)) {
2244 if (unlikely(domain->ops->detach_dev == NULL)) {
2248 domain->ops->detach_dev(domain, dev);
2252 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2268 iommu_detach_group_ext(domain, group);
2278 struct iommu_domain *domain;
2286 domain = group->domain;
2290 return domain;
2296 * guarantees that the group and its default domain are valid and correct.
2315 struct iommu_domain *domain = data;
2317 return iommu_attach_device_ext(domain, dev);
2320 static int iommu_attach_group_ext(struct iommu_domain *domain, struct iommu_group *group)
2324 if (group->default_domain && group->domain != group->default_domain) {
2328 ret = iommu_group_for_each_dev_ext(group, domain, iommu_group_do_attach_device);
2330 group->domain = domain;
2336 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2341 ret = iommu_attach_group_ext(domain, group);
2350 struct iommu_domain *domain = data;
2352 __iommu_detach_device(domain, dev);
2357 static void iommu_detach_group_ext(struct iommu_domain *domain, struct iommu_group *group)
2362 iommu_group_for_each_dev_ext(group, domain, iommu_group_do_detach_device);
2363 group->domain = NULL;
2367 if (group->domain == group->default_domain) {
2371 /* Detach by re-attaching to the default domain */
2376 group->domain = group->default_domain;
2380 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2383 iommu_detach_group_ext(domain, group);
2388 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2390 if (unlikely(domain->ops->iova_to_phys == NULL)) {
2394 return domain->ops->iova_to_phys(domain, iova);
2398 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size,
2406 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2420 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2448 static int iommu_map_pages_ext(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size,
2451 const struct iommu_ops *ops = domain->ops;
2455 pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2460 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, gfp, mapped);
2462 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2469 static int iommu_map_ext(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot,
2472 const struct iommu_ops *ops = domain->ops;
2479 if (unlikely(!(ops->map || ops->map_pages) || domain->pgsize_bitmap == 0UL)) {
2483 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) {
2488 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2504 ret = iommu_map_pages_ext(domain, iova, paddr, size, prot, gfp, &mapped);
2521 iommu_unmap(domain, orig_iova, orig_size - size);
2529 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot,
2532 const struct iommu_ops *ops = domain->ops;
2535 ret = iommu_map_ext(domain, iova, paddr, size, prot, gfp);
2537 ops->iotlb_sync_map(domain, iova, size);
2543 int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot)
2546 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2550 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot)
2552 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2556 static size_t iommu_unmap_pages_ext(struct iommu_domain *domain, unsigned long iova, size_t size,
2559 const struct iommu_ops *ops = domain->ops;
2562 pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2563 return ops->unmap_pages ? ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather)
2564 : ops->unmap(domain, iova, pgsize, iotlb_gather);
2567 static size_t iommu_unmap_ext(struct iommu_domain *domain, unsigned long iova, size_t size,
2570 const struct iommu_ops *ops = domain->ops;
2575 if (unlikely(!(ops->unmap || ops->unmap_pages) || domain->pgsize_bitmap == 0UL)) {
2579 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) {
2583 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2601 unmapped_page = iommu_unmap_pages_ext(domain, iova, size - unmapped, iotlb_gather);
2616 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
2622 ret = iommu_unmap_ext(domain, iova, size, &iotlb_gather);
2623 iommu_iotlb_sync(domain, &iotlb_gather);
2629 size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size,
2632 return iommu_unmap_ext(domain, iova, size, iotlb_gather);
2636 static size_t iommu_map_sg_ext(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg,
2639 const struct iommu_ops *ops = domain->ops;
2646 ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped);
2649 ops->iotlb_sync_map(domain, iova, mapped);
2662 ret = iommu_map_ext(domain, iova + mapped, start, len, prot, gfp);
2684 ops->iotlb_sync_map(domain, iova, mapped);
2688 if (domain->ops->flush_iotlb_all && (prot & IOMMU_TLB_SHOT_ENTIRE)) {
2689 domain->ops->flush_iotlb_all(domain);
2697 iommu_unmap(domain, iova, mapped);
2702 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents,
2706 return iommu_map_sg_ext(domain, iova, sg, nents, prot, GFP_KERNEL);
2710 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents,
2713 return iommu_map_sg_ext(domain, iova, sg, nents, prot, GFP_ATOMIC);
2717 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot)
2719 if (unlikely(domain->ops->domain_window_enable == NULL)) {
2723 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, prot);
2727 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2729 if (unlikely(domain->ops->domain_window_disable == NULL)) {
2733 return domain->ops->domain_window_disable(domain, wnd_nr);
2739 * @domain: the iommu domain where the fault has happened
2761 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags)
2769 if (domain->handler) {
2770 ret = domain->handler(domain, dev, iova, flags, domain->handler_token);
2789 int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data)
2798 *geometry = domain->geometry;
2803 *paging = (domain->pgsize_bitmap != 0UL);
2806 if (!domain->ops->domain_get_attr) {
2810 ret = domain->ops->domain_get_attr(domain, attr, data);
2817 int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data)
2821 if (domain->ops->domain_set_attr == NULL) {
2825 ret = domain->ops->domain_set_attr(domain, attr, data);
3022 * For example, before disabling the aux-domain feature, the device driver
3054 * Aux-domain specific attach/detach.
3063 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
3067 if (domain->ops->aux_attach_dev) {
3068 ret = domain->ops->aux_attach_dev(domain, dev);
3079 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
3081 if (domain->ops->aux_detach_dev) {
3082 domain->ops->aux_detach_dev(domain, dev);
3088 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
3092 if (domain->ops->aux_get_pasid) {
3093 ret = domain->ops->aux_get_pasid(domain, dev);
3130 /* Ensure device count and domain don't change while we're binding */