1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7#ifndef __LINUX_IOMMU_H 8#define __LINUX_IOMMU_H 9 10#include <linux/scatterlist.h> 11#include <linux/device.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/err.h> 15#include <linux/of.h> 16#include <linux/ioasid.h> 17#include <uapi/linux/iommu.h> 18 19#define IOMMU_READ (1 << 0) 20#define IOMMU_WRITE (1 << 1) 21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22#define IOMMU_NOEXEC (1 << 3) 23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24/* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33#define IOMMU_PRIV (1 << 5) 34/* 35 * Non-coherent masters can use this page protection flag to set cacheable 36 * memory attributes for only a transparent outer level of cache, also known as 37 * the last-level or system cache. 38 */ 39#define IOMMU_SYS_CACHE_ONLY (1 << 6) 40/* 41 * Non-coherent masters can use this page protection flag to set cacheable 42 * memory attributes with a no write allocation cache policy for only a 43 * transparent outer level of cache, also known as the last-level or system 44 * cache. 45 */ 46#define IOMMU_SYS_CACHE_ONLY_NWA (1 << 7) 47 48#ifdef CONFIG_NO_GKI 49 50/* For shoting entire IOMMU tlb once */ 51#define IOMMU_TLB_SHOT_ENTIRE (1 << 8) 52 53#endif 54 55struct iommu_ops; 56struct iommu_group; 57struct bus_type; 58struct device; 59struct iommu_domain; 60struct notifier_block; 61struct iommu_sva; 62struct iommu_fault_event; 63 64/* iommu fault flags */ 65#define IOMMU_FAULT_READ 0x0 66#define IOMMU_FAULT_WRITE 0x1 67 68typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); 69typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 70 71struct iommu_domain_geometry { 72 dma_addr_t aperture_start; /* First address that can be mapped */ 73 dma_addr_t aperture_end; /* Last address that can be mapped */ 74 bool force_aperture; /* DMA only allowed in mappable range? */ 75}; 76 77/* Domain feature flags */ 78#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 79#define IOMMU_DOMAIN_DMA_API \ 80 (1U << 1) /* Domain for use in DMA-API \ 81 implementation */ 82#define IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 83 84/* 85 * This are the possible domain-types 86 * 87 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 88 * devices 89 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 90 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 91 * for VMs 92 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 93 * This flag allows IOMMU drivers to implement 94 * certain optimizations for these domains 95 */ 96#define IOMMU_DOMAIN_BLOCKED (0U) 97#define IOMMU_DOMAIN_IDENTITY (IOMMU_DOMAIN_PT) 98#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 99#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | IOMMU_DOMAIN_DMA_API) 100 101struct iommu_domain { 102 unsigned type; 103 const struct iommu_ops *ops; 104 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 105 iommu_fault_handler_t handler; 106 void *handler_token; 107 struct iommu_domain_geometry geometry; 108 void *iova_cookie; 109}; 110 111enum iommu_cap { 112 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA 113 transactions */ 114 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 115 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 116}; 117 118/* 119 * Following constraints are specifc to FSL_PAMUV1: 120 * -aperture must be power of 2, and naturally aligned 121 * -number of windows must be power of 2, and address space size 122 * of each window is determined by aperture size / # of windows 123 * -the actual size of the mapped region of a window must be power 124 * of 2 starting with 4KB and physical address must be naturally 125 * aligned. 126 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. 127 * The caller can invoke iommu_domain_get_attr to check if the underlying 128 * iommu implementation supports these constraints. 129 */ 130 131enum iommu_attr { 132 DOMAIN_ATTR_GEOMETRY, 133 DOMAIN_ATTR_PAGING, 134 DOMAIN_ATTR_WINDOWS, 135 DOMAIN_ATTR_FSL_PAMU_STASH, 136 DOMAIN_ATTR_FSL_PAMU_ENABLE, 137 DOMAIN_ATTR_FSL_PAMUV1, 138 DOMAIN_ATTR_NESTING, /* two stages of translation */ 139 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 140 DOMAIN_ATTR_MAX, 141}; 142 143/* These are the possible reserved region types */ 144enum iommu_resv_type { 145 /* Memory regions which must be mapped 1:1 at all times */ 146 IOMMU_RESV_DIRECT, 147 /* 148 * Memory regions which are advertised to be 1:1 but are 149 * commonly considered relaxable in some conditions, 150 * for instance in device assignment use case (USB, Graphics) 151 */ 152 IOMMU_RESV_DIRECT_RELAXABLE, 153 /* Arbitrary "never map this or give it to a device" address ranges */ 154 IOMMU_RESV_RESERVED, 155 /* Hardware MSI region (untranslated) */ 156 IOMMU_RESV_MSI, 157 /* Software-managed MSI translation window */ 158 IOMMU_RESV_SW_MSI, 159}; 160 161/** 162 * struct iommu_resv_region - descriptor for a reserved memory region 163 * @list: Linked list pointers 164 * @start: System physical start address of the region 165 * @length: Length of the region in bytes 166 * @prot: IOMMU Protection flags (READ/WRITE/...) 167 * @type: Type of the reserved region 168 */ 169struct iommu_resv_region { 170 struct list_head list; 171 phys_addr_t start; 172 size_t length; 173 int prot; 174 enum iommu_resv_type type; 175}; 176 177/* Per device IOMMU features */ 178enum iommu_dev_features { 179 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ 180 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ 181}; 182 183#define IOMMU_PASID_INVALID (-1U) 184 185#ifdef CONFIG_IOMMU_API 186 187/** 188 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 189 * 190 * @start: IOVA representing the start of the range to be flushed 191 * @end: IOVA representing the end of the range to be flushed (inclusive) 192 * @pgsize: The interval at which to perform the flush 193 * 194 * This structure is intended to be updated by multiple calls to the 195 * ->unmap() function in struct iommu_ops before eventually being passed 196 * into ->iotlb_sync(). 197 */ 198struct iommu_iotlb_gather { 199 unsigned long start; 200 unsigned long end; 201 size_t pgsize; 202}; 203 204/** 205 * struct iommu_ops - iommu ops and capabilities 206 * @capable: check capability 207 * @domain_alloc: allocate iommu domain 208 * @domain_free: free iommu domain 209 * @attach_dev: attach device to an iommu domain 210 * @detach_dev: detach device from an iommu domain 211 * @map: map a physically contiguous memory region to an iommu domain 212 * @map_pages: map a physically contiguous set of pages of the same size to 213 * an iommu domain. 214 * @map_sg: map a scatter-gather list of physically contiguous chunks to 215 * an iommu domain. 216 * @unmap: unmap a physically contiguous memory region from an iommu domain 217 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 218 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 219 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 220 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 221 * queue 222 * @iova_to_phys: translate iova to physical address 223 * @probe_device: Add device to iommu driver handling 224 * @release_device: Remove device from iommu driver handling 225 * @probe_finalize: Do final setup work after the device is added to an IOMMU 226 * group and attached to the groups domain 227 * @device_group: find iommu group for a particular device 228 * @domain_get_attr: Query domain attributes 229 * @domain_set_attr: Change domain attributes 230 * @get_resv_regions: Request list of reserved regions for a device 231 * @put_resv_regions: Free list of reserved regions for a device 232 * @apply_resv_region: Temporary helper call-back for iova reserved ranges 233 * @domain_window_enable: Configure and enable a particular window for a domain 234 * @domain_window_disable: Disable a particular window for a domain 235 * @of_xlate: add OF master IDs to iommu grouping 236 * @is_attach_deferred: Check if domain attach should be deferred from iommu 237 * driver init to device driver init (default no) 238 * @dev_has/enable/disable_feat: per device entries to check/enable/disable 239 * iommu specific features. 240 * @dev_feat_enabled: check enabled feature 241 * @aux_attach/detach_dev: aux-domain specific attach/detach entries. 242 * @aux_get_pasid: get the pasid given an aux-domain 243 * @sva_bind: Bind process address space to device 244 * @sva_unbind: Unbind process address space from device 245 * @sva_get_pasid: Get PASID associated to a SVA handle 246 * @page_response: handle page request response 247 * @cache_invalidate: invalidate translation caches 248 * @sva_bind_gpasid: bind guest pasid and mm 249 * @sva_unbind_gpasid: unbind guest pasid and mm 250 * @def_domain_type: device default domain type, return value: 251 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 252 * - IOMMU_DOMAIN_DMA: must use a dma domain 253 * - 0: use the default setting 254 * @pgsize_bitmap: bitmap of all possible supported page sizes 255 * @owner: Driver module providing these ops 256 */ 257struct iommu_ops { 258 bool (*capable)(enum iommu_cap); 259 260 /* Domain allocation and freeing by the iommu driver */ 261 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 262 void (*domain_free)(struct iommu_domain *); 263 264 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 265 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 266 int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 267 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, 268 int prot, gfp_t gfp, size_t *mapped); 269 int (*map_sg)(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, 270 gfp_t gfp, size_t *mapped); 271 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, 272 struct iommu_iotlb_gather *iotlb_gather); 273 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, 274 struct iommu_iotlb_gather *iotlb_gather); 275 void (*flush_iotlb_all)(struct iommu_domain *domain); 276 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, size_t size); 277 void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); 278 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 279 struct iommu_device *(*probe_device)(struct device *dev); 280 void (*release_device)(struct device *dev); 281 void (*probe_finalize)(struct device *dev); 282 struct iommu_group *(*device_group)(struct device *dev); 283 int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); 284 int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); 285 286 /* Request/Free a list of reserved regions for a device */ 287 void (*get_resv_regions)(struct device *dev, struct list_head *list); 288 void (*put_resv_regions)(struct device *dev, struct list_head *list); 289 void (*apply_resv_region)(struct device *dev, struct iommu_domain *domain, struct iommu_resv_region *region); 290 291 /* Window handling functions */ 292 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot); 293 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); 294 295 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 296 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); 297 298 /* Per device IOMMU features */ 299 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); 300 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); 301 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 302 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 303 304 /* Aux-domain specific attach/detach entries */ 305 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); 306 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); 307 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); 308 309 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, void *drvdata); 310 void (*sva_unbind)(struct iommu_sva *handle); 311 u32 (*sva_get_pasid)(struct iommu_sva *handle); 312 313 int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); 314 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, 315 struct iommu_cache_invalidate_info *inv_info); 316 int (*sva_bind_gpasid)(struct iommu_domain *domain, struct device *dev, struct iommu_gpasid_bind_data *data); 317 318 int (*sva_unbind_gpasid)(struct device *dev, u32 pasid); 319 320 int (*def_domain_type)(struct device *dev); 321 322 unsigned long pgsize_bitmap; 323 struct module *owner; 324}; 325 326/** 327 * struct iommu_device - IOMMU core representation of one IOMMU hardware 328 * instance 329 * @list: Used by the iommu-core to keep a list of registered iommus 330 * @ops: iommu-ops for talking to this iommu 331 * @dev: struct device for sysfs handling 332 */ 333struct iommu_device { 334 struct list_head list; 335 const struct iommu_ops *ops; 336 struct fwnode_handle *fwnode; 337 struct device *dev; 338}; 339 340/** 341 * struct iommu_fault_event - Generic fault event 342 * 343 * Can represent recoverable faults such as a page requests or 344 * unrecoverable faults such as DMA or IRQ remapping faults. 345 * 346 * @fault: fault descriptor 347 * @list: pending fault event list, used for tracking responses 348 */ 349struct iommu_fault_event { 350 struct iommu_fault fault; 351 struct list_head list; 352}; 353 354/** 355 * struct iommu_fault_param - per-device IOMMU fault data 356 * @handler: Callback function to handle IOMMU faults at device level 357 * @data: handler private data 358 * @faults: holds the pending faults which needs response 359 * @lock: protect pending faults list 360 */ 361struct iommu_fault_param { 362 iommu_dev_fault_handler_t handler; 363 void *data; 364 struct list_head faults; 365 struct mutex lock; 366}; 367 368/** 369 * struct dev_iommu - Collection of per-device IOMMU data 370 * 371 * @fault_param: IOMMU detected device fault reporting data 372 * @fwspec: IOMMU fwspec data 373 * @iommu_dev: IOMMU device this device is linked to 374 * @priv: IOMMU Driver private data 375 * 376 * migrate other per device data pointers under iommu_dev_data, e.g. 377 * struct iommu_group *iommu_group; 378 */ 379struct dev_iommu { 380 struct mutex lock; 381 struct iommu_fault_param *fault_param; 382 struct iommu_fwspec *fwspec; 383 struct iommu_device *iommu_dev; 384 void *priv; 385}; 386 387int iommu_device_register(struct iommu_device *iommu); 388void iommu_device_unregister(struct iommu_device *iommu); 389int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, const struct attribute_group **groups, 390 const char *fmt, ...) __printf(4, 5); 391void iommu_device_sysfs_remove(struct iommu_device *iommu); 392int iommu_device_link(struct iommu_device *iommu, struct device *link); 393void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 394 395static inline void _iommu_device_set_ops(struct iommu_device *iommu, const struct iommu_ops *ops) 396{ 397 iommu->ops = ops; 398} 399 400#define iommu_device_set_ops(iommu, ops) \ 401 do { \ 402 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ 403 __ops->owner = THIS_MODULE; \ 404 _iommu_device_set_ops(iommu, __ops); \ 405 } while (0) 406 407static inline void iommu_device_set_fwnode(struct iommu_device *iommu, struct fwnode_handle *fwnode) 408{ 409 iommu->fwnode = fwnode; 410} 411 412static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 413{ 414 return (struct iommu_device *)dev_get_drvdata(dev); 415} 416 417static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 418{ 419 *gather = (struct iommu_iotlb_gather) { 420 .start = ULONG_MAX, 421 }; 422} 423 424#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 425#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 426#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 427#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ 428#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ 429#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ 430 431extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); 432extern int bus_iommu_probe(struct bus_type *bus); 433extern bool iommu_present(struct bus_type *bus); 434extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); 435extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 436extern struct iommu_group *iommu_group_get_by_id(int id); 437extern void iommu_domain_free(struct iommu_domain *domain); 438extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); 439extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); 440extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, void __user *uinfo); 441 442extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, void __user *udata); 443extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, void __user *udata); 444extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); 445extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 446extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 447extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); 448extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); 449extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); 450extern size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, 451 struct iommu_iotlb_gather *iotlb_gather); 452extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, 453 int prot); 454extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, 455 unsigned int nents, int prot); 456extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 457extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); 458 459extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 460extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 461extern void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list); 462extern void iommu_set_default_passthrough(bool cmd_line); 463extern void iommu_set_default_translated(bool cmd_line); 464extern bool iommu_default_passthrough(void); 465extern struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 466 enum iommu_resv_type type); 467extern int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head); 468 469extern int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); 470extern void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group); 471extern struct iommu_group *iommu_group_alloc(void); 472extern void *iommu_group_get_iommudata(struct iommu_group *group); 473extern void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, void (*release)(void *iommu_data)); 474extern int iommu_group_set_name(struct iommu_group *group, const char *name); 475extern int iommu_group_add_device(struct iommu_group *group, struct device *dev); 476extern void iommu_group_remove_device(struct device *dev); 477extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)); 478extern struct iommu_group *iommu_group_get(struct device *dev); 479extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 480extern void iommu_group_put(struct iommu_group *group); 481extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); 482extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); 483extern int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data); 484 485extern int iommu_unregister_device_fault_handler(struct device *dev); 486 487extern int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt); 488extern int iommu_page_response(struct device *dev, struct iommu_page_response *msg); 489 490extern int iommu_group_id(struct iommu_group *group); 491extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 492 493extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, void *data); 494extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); 495 496/* Window handling function prototypes */ 497extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t offset, u64 size, int prot); 498extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); 499 500extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); 501 502static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 503{ 504 if (domain->ops->flush_iotlb_all) { 505 domain->ops->flush_iotlb_all(domain); 506 } 507} 508 509static inline void iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) 510{ 511 if (domain->ops->iotlb_sync) { 512 domain->ops->iotlb_sync(domain, iotlb_gather); 513 } 514 515 iommu_iotlb_gather_init(iotlb_gather); 516} 517 518static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, 519 unsigned long iova, size_t size) 520{ 521 unsigned long start = iova, end = start + size - 1; 522 523 /* 524 * If the new page is disjoint from the current range or is mapped at 525 * a different granularity, then sync the TLB so that the gather 526 * structure can be rewritten. 527 */ 528 if (gather->pgsize != size || end + 1 < gather->start || start > gather->end + 1) { 529 if (gather->pgsize) { 530 iommu_iotlb_sync(domain, gather); 531 } 532 gather->pgsize = size; 533 } 534 535 if (gather->end < end) { 536 gather->end = end; 537 } 538 539 if (gather->start > start) { 540 gather->start = start; 541 } 542} 543 544/* PCI device grouping function */ 545extern struct iommu_group *pci_device_group(struct device *dev); 546/* Generic device grouping function */ 547extern struct iommu_group *generic_device_group(struct device *dev); 548extern void rk_iommu_mask_irq(struct device *dev); 549extern void rk_iommu_unmask_irq(struct device *dev); 550/* FSL-MC device grouping function */ 551struct iommu_group *fsl_mc_device_group(struct device *dev); 552 553/** 554 * struct iommu_fwspec - per-device IOMMU instance data 555 * @ops: ops for this device's IOMMU 556 * @iommu_fwnode: firmware handle for this device's IOMMU 557 * @iommu_priv: IOMMU driver private data for this device 558 * @num_pasid_bits: number of PASID bits supported by this device 559 * @num_ids: number of associated device IDs 560 * @ids: IDs which this device may present to the IOMMU 561 */ 562struct iommu_fwspec { 563 const struct iommu_ops *ops; 564 struct fwnode_handle *iommu_fwnode; 565 u32 flags; 566 u32 num_pasid_bits; 567 unsigned int num_ids; 568 u32 ids[]; 569}; 570 571/* ATS is supported */ 572#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 573 574/** 575 * struct iommu_sva - handle to a device-mm bond 576 */ 577struct iommu_sva { 578 struct device *dev; 579}; 580 581int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops); 582void iommu_fwspec_free(struct device *dev); 583int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 584const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 585 586static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 587{ 588 if (dev->iommu) { 589 return dev->iommu->fwspec; 590 } else { 591 return NULL; 592 } 593} 594 595static inline void dev_iommu_fwspec_set(struct device *dev, struct iommu_fwspec *fwspec) 596{ 597 dev->iommu->fwspec = fwspec; 598} 599 600static inline void *dev_iommu_priv_get(struct device *dev) 601{ 602 if (dev->iommu) { 603 return dev->iommu->priv; 604 } else { 605 return NULL; 606 } 607} 608 609static inline void dev_iommu_priv_set(struct device *dev, void *priv) 610{ 611 dev->iommu->priv = priv; 612} 613 614int iommu_probe_device(struct device *dev); 615void iommu_release_device(struct device *dev); 616 617bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); 618int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 619int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 620bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); 621int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); 622void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); 623int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); 624 625struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata); 626void iommu_sva_unbind_device(struct iommu_sva *handle); 627u32 iommu_sva_get_pasid(struct iommu_sva *handle); 628 629#else /* CONFIG_IOMMU_API */ 630 631struct iommu_ops { 632}; 633struct iommu_group { 634}; 635struct iommu_fwspec { 636}; 637struct iommu_device { 638}; 639struct iommu_fault_param { 640}; 641struct iommu_iotlb_gather { 642}; 643 644static inline bool iommu_present(struct bus_type *bus) 645{ 646 return false; 647} 648 649static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 650{ 651 return false; 652} 653 654static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 655{ 656 return NULL; 657} 658 659static inline struct iommu_group *iommu_group_get_by_id(int id) 660{ 661 return NULL; 662} 663 664static inline void iommu_domain_free(struct iommu_domain *domain) 665{ 666} 667 668static inline int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 669{ 670 return -ENODEV; 671} 672 673static inline void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 674{ 675} 676 677static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 678{ 679 return NULL; 680} 681 682static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) 683{ 684 return -ENODEV; 685} 686 687static inline int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, 688 int prot) 689{ 690 return -ENODEV; 691} 692 693static inline size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) 694{ 695 return 0; 696} 697 698static inline size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, int gfp_order, 699 struct iommu_iotlb_gather *iotlb_gather) 700{ 701 return 0; 702} 703 704static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, 705 unsigned int nents, int prot) 706{ 707 return 0; 708} 709 710static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, 711 unsigned int nents, int prot) 712{ 713 return 0; 714} 715 716static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 717{ 718} 719 720static inline void iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) 721{ 722} 723 724static inline int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, 725 int prot) 726{ 727 return -ENODEV; 728} 729 730static inline void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) 731{ 732} 733 734static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 735{ 736 return 0; 737} 738 739static inline void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token) 740{ 741} 742 743static inline void iommu_get_resv_regions(struct device *dev, struct list_head *list) 744{ 745} 746 747static inline void iommu_put_resv_regions(struct device *dev, struct list_head *list) 748{ 749} 750 751static inline int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head) 752{ 753 return -ENODEV; 754} 755 756static inline void iommu_set_default_passthrough(bool cmd_line) 757{ 758} 759 760static inline void iommu_set_default_translated(bool cmd_line) 761{ 762} 763 764static inline bool iommu_default_passthrough(void) 765{ 766 return true; 767} 768 769static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 770{ 771 return -ENODEV; 772} 773 774static inline void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 775{ 776} 777 778static inline struct iommu_group *iommu_group_alloc(void) 779{ 780 return ERR_PTR(-ENODEV); 781} 782 783static inline void *iommu_group_get_iommudata(struct iommu_group *group) 784{ 785 return NULL; 786} 787 788static inline void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 789 void (*release)(void *iommu_data)) 790{ 791} 792 793static inline int iommu_group_set_name(struct iommu_group *group, const char *name) 794{ 795 return -ENODEV; 796} 797 798static inline int iommu_group_add_device(struct iommu_group *group, struct device *dev) 799{ 800 return -ENODEV; 801} 802 803static inline void iommu_group_remove_device(struct device *dev) 804{ 805} 806 807static inline int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) 808{ 809 return -ENODEV; 810} 811 812static inline struct iommu_group *iommu_group_get(struct device *dev) 813{ 814 return NULL; 815} 816 817static inline void iommu_group_put(struct iommu_group *group) 818{ 819} 820 821static inline int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb) 822{ 823 return -ENODEV; 824} 825 826static inline int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb) 827{ 828 return 0; 829} 830 831static inline int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data) 832{ 833 return -ENODEV; 834} 835 836static inline int iommu_unregister_device_fault_handler(struct device *dev) 837{ 838 return 0; 839} 840 841static inline int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 842{ 843 return -ENODEV; 844} 845 846static inline int iommu_page_response(struct device *dev, struct iommu_page_response *msg) 847{ 848 return -ENODEV; 849} 850 851static inline int iommu_group_id(struct iommu_group *group) 852{ 853 return -ENODEV; 854} 855 856static inline int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) 857{ 858 return -EINVAL; 859} 860 861static inline int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) 862{ 863 return -EINVAL; 864} 865 866static inline int iommu_device_register(struct iommu_device *iommu) 867{ 868 return -ENODEV; 869} 870 871static inline void iommu_device_set_ops(struct iommu_device *iommu, const struct iommu_ops *ops) 872{ 873} 874 875static inline void iommu_device_set_fwnode(struct iommu_device *iommu, struct fwnode_handle *fwnode) 876{ 877} 878 879static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 880{ 881 return NULL; 882} 883 884static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 885{ 886} 887 888static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, 889 unsigned long iova, size_t size) 890{ 891} 892 893static inline void iommu_device_unregister(struct iommu_device *iommu) 894{ 895} 896 897static inline int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, 898 const struct attribute_group **groups, const char *fmt, ...) 899{ 900 return -ENODEV; 901} 902 903static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 904{ 905} 906 907static inline int iommu_device_link(struct device *dev, struct device *link) 908{ 909 return -EINVAL; 910} 911 912static inline void iommu_device_unlink(struct device *dev, struct device *link) 913{ 914} 915 916static inline int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops) 917{ 918 return -ENODEV; 919} 920 921static inline void iommu_fwspec_free(struct device *dev) 922{ 923} 924 925static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 926{ 927 return -ENODEV; 928} 929 930static inline const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 931{ 932 return NULL; 933} 934 935static inline bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 936{ 937 return false; 938} 939 940static inline bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 941{ 942 return false; 943} 944 945static inline int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 946{ 947 return -ENODEV; 948} 949 950static inline int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 951{ 952 return -ENODEV; 953} 954 955static inline int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 956{ 957 return -ENODEV; 958} 959 960static inline void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 961{ 962} 963 964static inline int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 965{ 966 return -ENODEV; 967} 968 969static inline struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 970{ 971 return NULL; 972} 973 974static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 975{ 976} 977 978static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 979{ 980 return IOMMU_PASID_INVALID; 981} 982 983static inline int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, 984 struct iommu_cache_invalidate_info *inv_info) 985{ 986 return -ENODEV; 987} 988 989static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, void __user *udata) 990{ 991 return -ENODEV; 992} 993 994static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, void __user *udata) 995{ 996 return -ENODEV; 997} 998 999static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid) 1000{ 1001 return -ENODEV; 1002} 1003 1004static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1005{ 1006 return NULL; 1007} 1008 1009static inline void rk_iommu_mask_irq(struct device *dev) 1010{ 1011} 1012 1013static inline void rk_iommu_unmask_irq(struct device *dev) 1014{ 1015} 1016#endif /* CONFIG_IOMMU_API */ 1017 1018/** 1019 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1020 * @domain: The IOMMU domain to perform the mapping 1021 * @iova: The start address to map the buffer 1022 * @sgt: The sg_table object describing the buffer 1023 * @prot: IOMMU protection bits 1024 * 1025 * Creates a mapping at @iova for the buffer described by a scatterlist 1026 * stored in the given sg_table object in the provided IOMMU domain. 1027 */ 1028static inline size_t iommu_map_sgtable(struct iommu_domain *domain, unsigned long iova, struct sg_table *sgt, int prot) 1029{ 1030 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); 1031} 1032 1033#ifdef CONFIG_IOMMU_DEBUGFS 1034extern struct dentry *iommu_debugfs_dir; 1035void iommu_debugfs_setup(void); 1036#else 1037static inline void iommu_debugfs_setup(void) 1038{ 1039} 1040#endif 1041 1042#endif /* __LINUX_IOMMU_H */ 1043