Lines Matching refs:ops
191 const struct iommu_ops *ops = dev->bus->iommu_ops;
196 if (!ops) {
204 if (!try_module_get(ops->owner)) {
209 iommu_dev = ops->probe_device(dev);
233 ops->release_device(dev);
236 module_put(ops->owner);
246 const struct iommu_ops *ops = dev->bus->iommu_ops;
280 if (ops->probe_finalize) {
281 ops->probe_finalize(dev);
295 const struct iommu_ops *ops = dev->bus->iommu_ops;
303 ops->release_device(dev);
306 module_put(ops->owner);
747 if (domain->ops->apply_resv_region) {
748 domain->ops->apply_resv_region(dev, domain, entry);
783 if (domain->ops->is_attach_deferred) {
784 return domain->ops->is_attach_deferred(domain, dev);
1209 if (!domain || !domain->ops->page_response) {
1255 ret = domain->ops->page_response(dev, evt, msg);
1491 const struct iommu_ops *ops = dev->bus->iommu_ops;
1494 if (ops->def_domain_type) {
1495 type = ops->def_domain_type(dev);
1557 const struct iommu_ops *ops = dev->bus->iommu_ops;
1566 if (!ops) {
1570 group = ops->device_group(dev);
1632 * ADD/DEL call into iommu driver ops if provided, which may
1686 const struct iommu_ops *ops = dev->bus->iommu_ops;
1690 if (ops->def_domain_type) {
1691 type = ops->def_domain_type(dev);
1749 if (domain->ops->probe_finalize) {
1750 domain->ops->probe_finalize(dev);
1822 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1860 * @ops: the callbacks provided by the iommu-driver
1864 * the iommu-api after these ops are registered.
1867 * is set up. With this function the iommu-driver can set the iommu-ops
1870 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1874 if (ops == NULL) {
1883 bus->iommu_ops = ops;
1886 err = iommu_bus_init(bus, ops);
1945 domain->ops = bus->iommu_ops;
1961 domain->ops->domain_free(domain);
1969 if (unlikely(domain->ops->attach_dev == NULL)) {
1973 ret = domain->ops->attach_dev(domain, dev);
2077 if (unlikely(!domain->ops->cache_invalidate)) {
2125 return domain->ops->cache_invalidate(domain, dev, &inv_info);
2197 if (unlikely(!domain->ops->sva_bind_gpasid)) {
2206 return domain->ops->sva_bind_gpasid(domain, dev, &data);
2212 if (unlikely(!domain->ops->sva_unbind_gpasid)) {
2216 return domain->ops->sva_unbind_gpasid(dev, pasid);
2225 if (unlikely(!domain->ops->sva_bind_gpasid)) {
2244 if (unlikely(domain->ops->detach_dev == NULL)) {
2248 domain->ops->detach_dev(domain, dev);
2390 if (unlikely(domain->ops->iova_to_phys == NULL)) {
2394 return domain->ops->iova_to_phys(domain, iova);
2451 const struct iommu_ops *ops = domain->ops;
2459 if (ops->map_pages) {
2460 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, gfp, mapped);
2462 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2472 const struct iommu_ops *ops = domain->ops;
2479 if (unlikely(!(ops->map || ops->map_pages) || domain->pgsize_bitmap == 0UL)) {
2532 const struct iommu_ops *ops = domain->ops;
2536 if (ret == 0 && ops->iotlb_sync_map) {
2537 ops->iotlb_sync_map(domain, iova, size);
2559 const struct iommu_ops *ops = domain->ops;
2563 return ops->unmap_pages ? ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather)
2564 : ops->unmap(domain, iova, pgsize, iotlb_gather);
2570 const struct iommu_ops *ops = domain->ops;
2575 if (unlikely(!(ops->unmap || ops->unmap_pages) || domain->pgsize_bitmap == 0UL)) {
2639 const struct iommu_ops *ops = domain->ops;
2645 if (ops->map_sg) {
2646 ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped);
2648 if (ops->iotlb_sync_map) {
2649 ops->iotlb_sync_map(domain, iova, mapped);
2683 if (ops->iotlb_sync_map) {
2684 ops->iotlb_sync_map(domain, iova, mapped);
2688 if (domain->ops->flush_iotlb_all && (prot & IOMMU_TLB_SHOT_ENTIRE)) {
2689 domain->ops->flush_iotlb_all(domain);
2719 if (unlikely(domain->ops->domain_window_enable == NULL)) {
2723 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, prot);
2729 if (unlikely(domain->ops->domain_window_disable == NULL)) {
2733 return domain->ops->domain_window_disable(domain, wnd_nr);
2806 if (!domain->ops->domain_get_attr) {
2810 ret = domain->ops->domain_get_attr(domain, attr, data);
2821 if (domain->ops->domain_set_attr == NULL) {
2825 ret = domain->ops->domain_set_attr(domain, attr, data);
2832 const struct iommu_ops *ops = dev->bus->iommu_ops;
2834 if (ops && ops->get_resv_regions) {
2835 ops->get_resv_regions(dev, list);
2841 const struct iommu_ops *ops = dev->bus->iommu_ops;
2843 if (ops && ops->put_resv_regions) {
2844 ops->put_resv_regions(dev, list);
2910 const struct iommu_ops *ops = NULL;
2917 ops = iommu->ops;
2922 return ops;
2925 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops)
2930 return ops == fwspec->ops ? 0 : -EINVAL;
2945 fwspec->ops = ops;
2996 const struct iommu_ops *ops = dev->bus->iommu_ops;
2998 if (ops && ops->dev_has_feat) {
2999 return ops->dev_has_feat(dev, feat);
3009 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
3011 if (ops->dev_enable_feat) {
3012 return ops->dev_enable_feat(dev, feat);
3028 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
3030 if (ops->dev_disable_feat) {
3031 return ops->dev_disable_feat(dev, feat);
3042 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
3044 if (ops->dev_feat_enabled) {
3045 return ops->dev_feat_enabled(dev, feat);
3067 if (domain->ops->aux_attach_dev) {
3068 ret = domain->ops->aux_attach_dev(domain, dev);
3081 if (domain->ops->aux_detach_dev) {
3082 domain->ops->aux_detach_dev(domain, dev);
3092 if (domain->ops->aux_get_pasid) {
3093 ret = domain->ops->aux_get_pasid(domain, dev);
3119 const struct iommu_ops *ops = dev->bus->iommu_ops;
3121 if (!ops || !ops->sva_bind) {
3143 handle = ops->sva_bind(dev, mm, drvdata);
3167 const struct iommu_ops *ops = dev->bus->iommu_ops;
3169 if (!ops || !ops->sva_unbind) {
3179 ops->sva_unbind(handle);
3188 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3190 if (!ops || !ops->sva_get_pasid) {
3194 return ops->sva_get_pasid(handle);