1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * A fairly generic DMA-API to IOMMU-API glue layer. 4 * 5 * Copyright (C) 2014-2015 ARM Ltd. 6 * 7 * based in part on arch/arm/mm/dma-mapping.c: 8 * Copyright (C) 2000-2004 Russell King 9 */ 10 11#include <linux/acpi_iort.h> 12#include <linux/device.h> 13#include <linux/dma-map-ops.h> 14#include <linux/dma-iommu.h> 15#include <linux/gfp.h> 16#include <linux/huge_mm.h> 17#include <linux/iommu.h> 18#include <linux/iova.h> 19#include <linux/irq.h> 20#include <linux/mm.h> 21#include <linux/mutex.h> 22#include <linux/pci.h> 23#include <linux/scatterlist.h> 24#include <linux/vmalloc.h> 25#include <linux/crash_dump.h> 26 27struct iommu_dma_msi_page { 28 struct list_head list; 29 dma_addr_t iova; 30 phys_addr_t phys; 31}; 32 33enum iommu_dma_cookie_type { 34 IOMMU_DMA_IOVA_COOKIE, 35 IOMMU_DMA_MSI_COOKIE, 36}; 37 38struct iommu_dma_cookie { 39 enum iommu_dma_cookie_type type; 40 union { 41 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 42 struct iova_domain iovad; 43 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 44 dma_addr_t msi_iova; 45 }; 46 struct list_head msi_page_list; 47 48 /* Domain for flush queue callback; NULL if flush queue not in use */ 49 struct iommu_domain *fq_domain; 50}; 51 52static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 53{ 54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 55 return cookie->iovad.granule; 56 return PAGE_SIZE; 57} 58 59static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 60{ 61 struct iommu_dma_cookie *cookie; 62 63 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 64 if (cookie) { 65 INIT_LIST_HEAD(&cookie->msi_page_list); 66 cookie->type = type; 67 } 68 return cookie; 69} 70 71/** 72 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 73 * @domain: IOMMU domain to prepare for DMA-API usage 74 * 75 * IOMMU drivers should normally call this from their domain_alloc 76 * callback when domain->type == IOMMU_DOMAIN_DMA. 77 */ 78int iommu_get_dma_cookie(struct iommu_domain *domain) 79{ 80 if (domain->iova_cookie) 81 return -EEXIST; 82 83 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 84 if (!domain->iova_cookie) 85 return -ENOMEM; 86 87 return 0; 88} 89EXPORT_SYMBOL(iommu_get_dma_cookie); 90 91/** 92 * iommu_get_msi_cookie - Acquire just MSI remapping resources 93 * @domain: IOMMU domain to prepare 94 * @base: Start address of IOVA region for MSI mappings 95 * 96 * Users who manage their own IOVA allocation and do not want DMA API support, 97 * but would still like to take advantage of automatic MSI remapping, can use 98 * this to initialise their own domain appropriately. Users should reserve a 99 * contiguous IOVA region, starting at @base, large enough to accommodate the 100 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 101 * used by the devices attached to @domain. 102 */ 103int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 104{ 105 struct iommu_dma_cookie *cookie; 106 107 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 108 return -EINVAL; 109 110 if (domain->iova_cookie) 111 return -EEXIST; 112 113 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 114 if (!cookie) 115 return -ENOMEM; 116 117 cookie->msi_iova = base; 118 domain->iova_cookie = cookie; 119 return 0; 120} 121EXPORT_SYMBOL(iommu_get_msi_cookie); 122 123/** 124 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 125 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 126 * iommu_get_msi_cookie() 127 * 128 * IOMMU drivers should normally call this from their domain_free callback. 129 */ 130void iommu_put_dma_cookie(struct iommu_domain *domain) 131{ 132 struct iommu_dma_cookie *cookie = domain->iova_cookie; 133 struct iommu_dma_msi_page *msi, *tmp; 134 135 if (!cookie) 136 return; 137 138 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) 139 put_iova_domain(&cookie->iovad); 140 141 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 142 list_del(&msi->list); 143 kfree(msi); 144 } 145 kfree(cookie); 146 domain->iova_cookie = NULL; 147} 148EXPORT_SYMBOL(iommu_put_dma_cookie); 149 150/** 151 * iommu_dma_get_resv_regions - Reserved region driver helper 152 * @dev: Device from iommu_get_resv_regions() 153 * @list: Reserved region list from iommu_get_resv_regions() 154 * 155 * IOMMU drivers can use this to implement their .get_resv_regions callback 156 * for general non-IOMMU-specific reservations. Currently, this covers GICv3 157 * ITS region reservation on ACPI based ARM platforms that may require HW MSI 158 * reservation. 159 */ 160void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 161{ 162 163 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 164 iort_iommu_msi_get_resv_regions(dev, list); 165 166} 167EXPORT_SYMBOL(iommu_dma_get_resv_regions); 168 169static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 170 phys_addr_t start, phys_addr_t end) 171{ 172 struct iova_domain *iovad = &cookie->iovad; 173 struct iommu_dma_msi_page *msi_page; 174 int i, num_pages; 175 176 start -= iova_offset(iovad, start); 177 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 178 179 for (i = 0; i < num_pages; i++) { 180 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); 181 if (!msi_page) 182 return -ENOMEM; 183 184 msi_page->phys = start; 185 msi_page->iova = start; 186 INIT_LIST_HEAD(&msi_page->list); 187 list_add(&msi_page->list, &cookie->msi_page_list); 188 start += iovad->granule; 189 } 190 191 return 0; 192} 193 194static int iova_reserve_pci_windows(struct pci_dev *dev, 195 struct iova_domain *iovad) 196{ 197 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 198 struct resource_entry *window; 199 unsigned long lo, hi; 200 phys_addr_t start = 0, end; 201 202 resource_list_for_each_entry(window, &bridge->windows) { 203 if (resource_type(window->res) != IORESOURCE_MEM) 204 continue; 205 206 lo = iova_pfn(iovad, window->res->start - window->offset); 207 hi = iova_pfn(iovad, window->res->end - window->offset); 208 reserve_iova(iovad, lo, hi); 209 } 210 211 /* Get reserved DMA windows from host bridge */ 212 resource_list_for_each_entry(window, &bridge->dma_ranges) { 213 end = window->res->start - window->offset; 214resv_iova: 215 if (end > start) { 216 lo = iova_pfn(iovad, start); 217 hi = iova_pfn(iovad, end); 218 reserve_iova(iovad, lo, hi); 219 } else if (end < start) { 220 /* dma_ranges list should be sorted */ 221 dev_err(&dev->dev, 222 "Failed to reserve IOVA [%pa-%pa]\n", 223 &start, &end); 224 return -EINVAL; 225 } 226 227 start = window->res->end - window->offset + 1; 228 /* If window is last entry */ 229 if (window->node.next == &bridge->dma_ranges && 230 end != ~(phys_addr_t)0) { 231 end = ~(phys_addr_t)0; 232 goto resv_iova; 233 } 234 } 235 236 return 0; 237} 238 239static int iova_reserve_iommu_regions(struct device *dev, 240 struct iommu_domain *domain) 241{ 242 struct iommu_dma_cookie *cookie = domain->iova_cookie; 243 struct iova_domain *iovad = &cookie->iovad; 244 struct iommu_resv_region *region; 245 LIST_HEAD(resv_regions); 246 int ret = 0; 247 248 if (dev_is_pci(dev)) { 249 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 250 if (ret) 251 return ret; 252 } 253 254 iommu_get_resv_regions(dev, &resv_regions); 255 list_for_each_entry(region, &resv_regions, list) { 256 unsigned long lo, hi; 257 258 /* We ARE the software that manages these! */ 259 if (region->type == IOMMU_RESV_SW_MSI) 260 continue; 261 262 lo = iova_pfn(iovad, region->start); 263 hi = iova_pfn(iovad, region->start + region->length - 1); 264 reserve_iova(iovad, lo, hi); 265 266 if (region->type == IOMMU_RESV_MSI) 267 ret = cookie_init_hw_msi_region(cookie, region->start, 268 region->start + region->length); 269 if (ret) 270 break; 271 } 272 iommu_put_resv_regions(dev, &resv_regions); 273 274 return ret; 275} 276 277static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) 278{ 279 struct iommu_dma_cookie *cookie; 280 struct iommu_domain *domain; 281 282 cookie = container_of(iovad, struct iommu_dma_cookie, iovad); 283 domain = cookie->fq_domain; 284 /* 285 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE 286 * implies that ops->flush_iotlb_all must be non-NULL. 287 */ 288 domain->ops->flush_iotlb_all(domain); 289} 290 291/** 292 * iommu_dma_init_domain - Initialise a DMA mapping domain 293 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 294 * @base: IOVA at which the mappable address space starts 295 * @size: Size of IOVA space 296 * @dev: Device the domain is being initialised for 297 * 298 * @base and @size should be exact multiples of IOMMU page granularity to 299 * avoid rounding surprises. If necessary, we reserve the page at address 0 300 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 301 * any change which could make prior IOVAs invalid will fail. 302 */ 303static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 304 u64 size, struct device *dev) 305{ 306 struct iommu_dma_cookie *cookie = domain->iova_cookie; 307 unsigned long order, base_pfn; 308 struct iova_domain *iovad; 309 int attr; 310 311 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 312 return -EINVAL; 313 314 iovad = &cookie->iovad; 315 316 /* Use the smallest supported page size for IOVA granularity */ 317 order = __ffs(domain->pgsize_bitmap); 318 base_pfn = max_t(unsigned long, 1, base >> order); 319 320 /* Check the domain allows at least some access to the device... */ 321 if (domain->geometry.force_aperture) { 322 if (base > domain->geometry.aperture_end || 323 base + size <= domain->geometry.aperture_start) { 324 pr_warn("specified DMA range outside IOMMU capability\n"); 325 return -EFAULT; 326 } 327 /* ...then finally give it a kicking to make sure it fits */ 328 base_pfn = max_t(unsigned long, base_pfn, 329 domain->geometry.aperture_start >> order); 330 } 331 332 /* start_pfn is always nonzero for an already-initialised domain */ 333 if (iovad->start_pfn) { 334 if (1UL << order != iovad->granule || 335 base_pfn != iovad->start_pfn) { 336 pr_warn("Incompatible range for DMA domain\n"); 337 return -EFAULT; 338 } 339 340 return 0; 341 } 342 343 init_iova_domain(iovad, 1UL << order, base_pfn); 344 345 if (!cookie->fq_domain && !iommu_domain_get_attr(domain, 346 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { 347 if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, 348 NULL)) 349 pr_warn("iova flush queue initialization failed\n"); 350 else 351 cookie->fq_domain = domain; 352 } 353 354 if (!dev) 355 return 0; 356 357 return iova_reserve_iommu_regions(dev, domain); 358} 359 360static int iommu_dma_deferred_attach(struct device *dev, 361 struct iommu_domain *domain) 362{ 363 const struct iommu_ops *ops = domain->ops; 364 365 if (!is_kdump_kernel()) 366 return 0; 367 368 if (unlikely(ops->is_attach_deferred && 369 ops->is_attach_deferred(domain, dev))) 370 return iommu_attach_device(domain, dev); 371 372 return 0; 373} 374 375/** 376 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 377 * page flags. 378 * @dir: Direction of DMA transfer 379 * @coherent: Is the DMA master cache-coherent? 380 * @attrs: DMA attributes for the mapping 381 * 382 * Return: corresponding IOMMU API page protection flags 383 */ 384static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 385 unsigned long attrs) 386{ 387 int prot = coherent ? IOMMU_CACHE : 0; 388 389 if (attrs & DMA_ATTR_PRIVILEGED) 390 prot |= IOMMU_PRIV; 391 392 switch (dir) { 393 case DMA_BIDIRECTIONAL: 394 return prot | IOMMU_READ | IOMMU_WRITE; 395 case DMA_TO_DEVICE: 396 return prot | IOMMU_READ; 397 case DMA_FROM_DEVICE: 398 return prot | IOMMU_WRITE; 399 default: 400 return 0; 401 } 402} 403 404static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 405 size_t size, u64 dma_limit, struct device *dev) 406{ 407 struct iommu_dma_cookie *cookie = domain->iova_cookie; 408 struct iova_domain *iovad = &cookie->iovad; 409 unsigned long shift, iova_len, iova = 0; 410 411 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 412 cookie->msi_iova += size; 413 return cookie->msi_iova - size; 414 } 415 416 shift = iova_shift(iovad); 417 iova_len = size >> shift; 418 /* 419 * Freeing non-power-of-two-sized allocations back into the IOVA caches 420 * will come back to bite us badly, so we have to waste a bit of space 421 * rounding up anything cacheable to make sure that can't happen. The 422 * order of the unadjusted size will still match upon freeing. 423 */ 424 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) 425 iova_len = roundup_pow_of_two(iova_len); 426 427 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 428 429 if (domain->geometry.force_aperture) 430 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 431 432 /* Try to get PCI devices a SAC address */ 433 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) 434 iova = alloc_iova_fast(iovad, iova_len, 435 DMA_BIT_MASK(32) >> shift, false); 436 437 if (!iova) 438 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 439 true); 440 441 return (dma_addr_t)iova << shift; 442} 443 444static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 445 dma_addr_t iova, size_t size) 446{ 447 struct iova_domain *iovad = &cookie->iovad; 448 449 /* The MSI case is only ever cleaning up its most recent allocation */ 450 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 451 cookie->msi_iova -= size; 452 else if (cookie->fq_domain) /* non-strict mode */ 453 queue_iova(iovad, iova_pfn(iovad, iova), 454 size >> iova_shift(iovad), 0); 455 else 456 free_iova_fast(iovad, iova_pfn(iovad, iova), 457 size >> iova_shift(iovad)); 458} 459 460static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 461 size_t size) 462{ 463 struct iommu_domain *domain = iommu_get_dma_domain(dev); 464 struct iommu_dma_cookie *cookie = domain->iova_cookie; 465 struct iova_domain *iovad = &cookie->iovad; 466 size_t iova_off = iova_offset(iovad, dma_addr); 467 struct iommu_iotlb_gather iotlb_gather; 468 size_t unmapped; 469 470 dma_addr -= iova_off; 471 size = iova_align(iovad, size + iova_off); 472 iommu_iotlb_gather_init(&iotlb_gather); 473 474 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 475 WARN_ON(unmapped != size); 476 477 if (!cookie->fq_domain) 478 iommu_iotlb_sync(domain, &iotlb_gather); 479 iommu_dma_free_iova(cookie, dma_addr, size); 480} 481 482static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 483 size_t size, int prot, u64 dma_mask) 484{ 485 struct iommu_domain *domain = iommu_get_dma_domain(dev); 486 struct iommu_dma_cookie *cookie = domain->iova_cookie; 487 struct iova_domain *iovad = &cookie->iovad; 488 size_t iova_off = iova_offset(iovad, phys); 489 dma_addr_t iova; 490 491 if (unlikely(iommu_dma_deferred_attach(dev, domain))) 492 return DMA_MAPPING_ERROR; 493 494 size = iova_align(iovad, size + iova_off); 495 496 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 497 if (!iova) 498 return DMA_MAPPING_ERROR; 499 500 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { 501 iommu_dma_free_iova(cookie, iova, size); 502 return DMA_MAPPING_ERROR; 503 } 504 return iova + iova_off; 505} 506 507static void __iommu_dma_free_pages(struct page **pages, int count) 508{ 509 while (count--) 510 __free_page(pages[count]); 511 kvfree(pages); 512} 513 514static struct page **__iommu_dma_alloc_pages(struct device *dev, 515 unsigned int count, unsigned long order_mask, gfp_t gfp) 516{ 517 struct page **pages; 518 unsigned int i = 0, nid = dev_to_node(dev); 519 520 order_mask &= (2U << MAX_ORDER) - 1; 521 if (!order_mask) 522 return NULL; 523 524 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); 525 if (!pages) 526 return NULL; 527 528 /* IOMMU can map any pages, so himem can also be used here */ 529 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 530 531 /* It makes no sense to muck about with huge pages */ 532 gfp &= ~__GFP_COMP; 533 534 while (count) { 535 struct page *page = NULL; 536 unsigned int order_size; 537 538 /* 539 * Higher-order allocations are a convenience rather 540 * than a necessity, hence using __GFP_NORETRY until 541 * falling back to minimum-order allocations. 542 */ 543 for (order_mask &= (2U << __fls(count)) - 1; 544 order_mask; order_mask &= ~order_size) { 545 unsigned int order = __fls(order_mask); 546 gfp_t alloc_flags = gfp; 547 548 order_size = 1U << order; 549 if (order_mask > order_size) 550 alloc_flags |= __GFP_NORETRY; 551 page = alloc_pages_node(nid, alloc_flags, order); 552 if (!page) 553 continue; 554 if (order) 555 split_page(page, order); 556 break; 557 } 558 if (!page) { 559 __iommu_dma_free_pages(pages, i); 560 return NULL; 561 } 562 count -= order_size; 563 while (order_size--) 564 pages[i++] = page++; 565 } 566 return pages; 567} 568 569/** 570 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space 571 * @dev: Device to allocate memory for. Must be a real device 572 * attached to an iommu_dma_domain 573 * @size: Size of buffer in bytes 574 * @dma_handle: Out argument for allocated DMA handle 575 * @gfp: Allocation flags 576 * @prot: pgprot_t to use for the remapped mapping 577 * @attrs: DMA attributes for this allocation 578 * 579 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, 580 * but an IOMMU which supports smaller pages might not map the whole thing. 581 * 582 * Return: Mapped virtual address, or NULL on failure. 583 */ 584static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 585 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, 586 unsigned long attrs) 587{ 588 struct iommu_domain *domain = iommu_get_dma_domain(dev); 589 struct iommu_dma_cookie *cookie = domain->iova_cookie; 590 struct iova_domain *iovad = &cookie->iovad; 591 bool coherent = dev_is_dma_coherent(dev); 592 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 593 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 594 struct page **pages; 595 struct sg_table sgt; 596 dma_addr_t iova; 597 void *vaddr; 598 599 *dma_handle = DMA_MAPPING_ERROR; 600 601 if (unlikely(iommu_dma_deferred_attach(dev, domain))) 602 return NULL; 603 604 min_size = alloc_sizes & -alloc_sizes; 605 if (min_size < PAGE_SIZE) { 606 min_size = PAGE_SIZE; 607 alloc_sizes |= PAGE_SIZE; 608 } else { 609 size = ALIGN(size, min_size); 610 } 611 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 612 alloc_sizes = min_size; 613 614 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 615 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 616 gfp); 617 if (!pages) 618 return NULL; 619 620 size = iova_align(iovad, size); 621 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 622 if (!iova) 623 goto out_free_pages; 624 625 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) 626 goto out_free_iova; 627 628 if (!(ioprot & IOMMU_CACHE)) { 629 struct scatterlist *sg; 630 int i; 631 632 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) 633 arch_dma_prep_coherent(sg_page(sg), sg->length); 634 } 635 636 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) 637 < size) 638 goto out_free_sg; 639 640 vaddr = dma_common_pages_remap(pages, size, prot, 641 __builtin_return_address(0)); 642 if (!vaddr) 643 goto out_unmap; 644 645 *dma_handle = iova; 646 sg_free_table(&sgt); 647 return vaddr; 648 649out_unmap: 650 __iommu_dma_unmap(dev, iova, size); 651out_free_sg: 652 sg_free_table(&sgt); 653out_free_iova: 654 iommu_dma_free_iova(cookie, iova, size); 655out_free_pages: 656 __iommu_dma_free_pages(pages, count); 657 return NULL; 658} 659 660/** 661 * __iommu_dma_mmap - Map a buffer into provided user VMA 662 * @pages: Array representing buffer from __iommu_dma_alloc() 663 * @size: Size of buffer in bytes 664 * @vma: VMA describing requested userspace mapping 665 * 666 * Maps the pages of the buffer in @pages into @vma. The caller is responsible 667 * for verifying the correct size and protection of @vma beforehand. 668 */ 669static int __iommu_dma_mmap(struct page **pages, size_t size, 670 struct vm_area_struct *vma) 671{ 672 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 673} 674 675static void iommu_dma_sync_single_for_cpu(struct device *dev, 676 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 677{ 678 phys_addr_t phys; 679 680 if (dev_is_dma_coherent(dev)) 681 return; 682 683 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 684 arch_sync_dma_for_cpu(phys, size, dir); 685} 686 687static void iommu_dma_sync_single_for_device(struct device *dev, 688 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 689{ 690 phys_addr_t phys; 691 692 if (dev_is_dma_coherent(dev)) 693 return; 694 695 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 696 arch_sync_dma_for_device(phys, size, dir); 697} 698 699static void iommu_dma_sync_sg_for_cpu(struct device *dev, 700 struct scatterlist *sgl, int nelems, 701 enum dma_data_direction dir) 702{ 703 struct scatterlist *sg; 704 int i; 705 706 if (dev_is_dma_coherent(dev)) 707 return; 708 709 for_each_sg(sgl, sg, nelems, i) 710 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 711} 712 713static void iommu_dma_sync_sg_for_device(struct device *dev, 714 struct scatterlist *sgl, int nelems, 715 enum dma_data_direction dir) 716{ 717 struct scatterlist *sg; 718 int i; 719 720 if (dev_is_dma_coherent(dev)) 721 return; 722 723 for_each_sg(sgl, sg, nelems, i) 724 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 725} 726 727static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 728 unsigned long offset, size_t size, enum dma_data_direction dir, 729 unsigned long attrs) 730{ 731 phys_addr_t phys = page_to_phys(page) + offset; 732 bool coherent = dev_is_dma_coherent(dev); 733 int prot = dma_info_to_prot(dir, coherent, attrs); 734 dma_addr_t dma_handle; 735 736 dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); 737 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 738 dma_handle != DMA_MAPPING_ERROR) 739 arch_sync_dma_for_device(phys, size, dir); 740 return dma_handle; 741} 742 743static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 744 size_t size, enum dma_data_direction dir, unsigned long attrs) 745{ 746 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 747 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 748 __iommu_dma_unmap(dev, dma_handle, size); 749} 750 751/* 752 * Prepare a successfully-mapped scatterlist to give back to the caller. 753 * 754 * At this point the segments are already laid out by iommu_dma_map_sg() to 755 * avoid individually crossing any boundaries, so we merely need to check a 756 * segment's start address to avoid concatenating across one. 757 */ 758static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 759 dma_addr_t dma_addr) 760{ 761 struct scatterlist *s, *cur = sg; 762 unsigned long seg_mask = dma_get_seg_boundary(dev); 763 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 764 int i, count = 0; 765 766 for_each_sg(sg, s, nents, i) { 767 /* Restore this segment's original unaligned fields first */ 768 unsigned int s_iova_off = sg_dma_address(s); 769 unsigned int s_length = sg_dma_len(s); 770 unsigned int s_iova_len = s->length; 771 772 s->offset += s_iova_off; 773 s->length = s_length; 774 sg_dma_address(s) = DMA_MAPPING_ERROR; 775 sg_dma_len(s) = 0; 776 777 /* 778 * Now fill in the real DMA data. If... 779 * - there is a valid output segment to append to 780 * - and this segment starts on an IOVA page boundary 781 * - but doesn't fall at a segment boundary 782 * - and wouldn't make the resulting output segment too long 783 */ 784 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 785 (max_len - cur_len >= s_length)) { 786 /* ...then concatenate it with the previous one */ 787 cur_len += s_length; 788 } else { 789 /* Otherwise start the next output segment */ 790 if (i > 0) 791 cur = sg_next(cur); 792 cur_len = s_length; 793 count++; 794 795 sg_dma_address(cur) = dma_addr + s_iova_off; 796 } 797 798 sg_dma_len(cur) = cur_len; 799 dma_addr += s_iova_len; 800 801 if (s_length + s_iova_off < s_iova_len) 802 cur_len = 0; 803 } 804 return count; 805} 806 807/* 808 * If mapping failed, then just restore the original list, 809 * but making sure the DMA fields are invalidated. 810 */ 811static void __invalidate_sg(struct scatterlist *sg, int nents) 812{ 813 struct scatterlist *s; 814 int i; 815 816 for_each_sg(sg, s, nents, i) { 817 if (sg_dma_address(s) != DMA_MAPPING_ERROR) 818 s->offset += sg_dma_address(s); 819 if (sg_dma_len(s)) 820 s->length = sg_dma_len(s); 821 sg_dma_address(s) = DMA_MAPPING_ERROR; 822 sg_dma_len(s) = 0; 823 } 824} 825 826/* 827 * The DMA API client is passing in a scatterlist which could describe 828 * any old buffer layout, but the IOMMU API requires everything to be 829 * aligned to IOMMU pages. Hence the need for this complicated bit of 830 * impedance-matching, to be able to hand off a suitably-aligned list, 831 * but still preserve the original offsets and sizes for the caller. 832 */ 833static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 834 int nents, enum dma_data_direction dir, unsigned long attrs) 835{ 836 struct iommu_domain *domain = iommu_get_dma_domain(dev); 837 struct iommu_dma_cookie *cookie = domain->iova_cookie; 838 struct iova_domain *iovad = &cookie->iovad; 839 struct scatterlist *s, *prev = NULL; 840 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 841 dma_addr_t iova; 842 size_t iova_len = 0; 843 unsigned long mask = dma_get_seg_boundary(dev); 844 int i; 845 846 if (unlikely(iommu_dma_deferred_attach(dev, domain))) 847 return 0; 848 849 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 850 iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 851 852 /* 853 * Work out how much IOVA space we need, and align the segments to 854 * IOVA granules for the IOMMU driver to handle. With some clever 855 * trickery we can modify the list in-place, but reversibly, by 856 * stashing the unaligned parts in the as-yet-unused DMA fields. 857 */ 858 for_each_sg(sg, s, nents, i) { 859 size_t s_iova_off = iova_offset(iovad, s->offset); 860 size_t s_length = s->length; 861 size_t pad_len = (mask - iova_len + 1) & mask; 862 863 sg_dma_address(s) = s_iova_off; 864 sg_dma_len(s) = s_length; 865 s->offset -= s_iova_off; 866 s_length = iova_align(iovad, s_length + s_iova_off); 867 s->length = s_length; 868 869 /* 870 * Due to the alignment of our single IOVA allocation, we can 871 * depend on these assumptions about the segment boundary mask: 872 * - If mask size >= IOVA size, then the IOVA range cannot 873 * possibly fall across a boundary, so we don't care. 874 * - If mask size < IOVA size, then the IOVA range must start 875 * exactly on a boundary, therefore we can lay things out 876 * based purely on segment lengths without needing to know 877 * the actual addresses beforehand. 878 * - The mask must be a power of 2, so pad_len == 0 if 879 * iova_len == 0, thus we cannot dereference prev the first 880 * time through here (i.e. before it has a meaningful value). 881 */ 882 if (pad_len && pad_len < s_length - 1) { 883 prev->length += pad_len; 884 iova_len += pad_len; 885 } 886 887 iova_len += s_length; 888 prev = s; 889 } 890 891 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 892 if (!iova) 893 goto out_restore_sg; 894 895 /* 896 * We'll leave any physical concatenation to the IOMMU driver's 897 * implementation - it knows better than we do. 898 */ 899 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) 900 goto out_free_iova; 901 902 return __finalise_sg(dev, sg, nents, iova); 903 904out_free_iova: 905 iommu_dma_free_iova(cookie, iova, iova_len); 906out_restore_sg: 907 __invalidate_sg(sg, nents); 908 return 0; 909} 910 911static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 912 int nents, enum dma_data_direction dir, unsigned long attrs) 913{ 914 dma_addr_t start, end; 915 struct scatterlist *tmp; 916 int i; 917 918 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 919 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 920 921 /* 922 * The scatterlist segments are mapped into a single 923 * contiguous IOVA allocation, so this is incredibly easy. 924 */ 925 start = sg_dma_address(sg); 926 for_each_sg(sg_next(sg), tmp, nents - 1, i) { 927 if (sg_dma_len(tmp) == 0) 928 break; 929 sg = tmp; 930 } 931 end = sg_dma_address(sg) + sg_dma_len(sg); 932 __iommu_dma_unmap(dev, start, end - start); 933} 934 935static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 936 size_t size, enum dma_data_direction dir, unsigned long attrs) 937{ 938 return __iommu_dma_map(dev, phys, size, 939 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 940 dma_get_mask(dev)); 941} 942 943static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 944 size_t size, enum dma_data_direction dir, unsigned long attrs) 945{ 946 __iommu_dma_unmap(dev, handle, size); 947} 948 949static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 950{ 951 size_t alloc_size = PAGE_ALIGN(size); 952 int count = alloc_size >> PAGE_SHIFT; 953 struct page *page = NULL, **pages = NULL; 954 955 /* Non-coherent atomic allocation? Easy */ 956 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 957 dma_free_from_pool(dev, cpu_addr, alloc_size)) 958 return; 959 960 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 961 /* 962 * If it the address is remapped, then it's either non-coherent 963 * or highmem CMA, or an iommu_dma_alloc_remap() construction. 964 */ 965 pages = dma_common_find_pages(cpu_addr); 966 if (!pages) 967 page = vmalloc_to_page(cpu_addr); 968 dma_common_free_remap(cpu_addr, alloc_size); 969 } else { 970 /* Lowmem means a coherent atomic or CMA allocation */ 971 page = virt_to_page(cpu_addr); 972 } 973 974 if (pages) 975 __iommu_dma_free_pages(pages, count); 976 if (page) 977 dma_free_contiguous(dev, page, alloc_size); 978} 979 980static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 981 dma_addr_t handle, unsigned long attrs) 982{ 983 __iommu_dma_unmap(dev, handle, size); 984 __iommu_dma_free(dev, size, cpu_addr); 985} 986 987static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 988 struct page **pagep, gfp_t gfp, unsigned long attrs) 989{ 990 bool coherent = dev_is_dma_coherent(dev); 991 size_t alloc_size = PAGE_ALIGN(size); 992 int node = dev_to_node(dev); 993 struct page *page = NULL; 994 void *cpu_addr; 995 996 page = dma_alloc_contiguous(dev, alloc_size, gfp); 997 if (!page) 998 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 999 if (!page) 1000 return NULL; 1001 1002 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 1003 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 1004 1005 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 1006 prot, __builtin_return_address(0)); 1007 if (!cpu_addr) 1008 goto out_free_pages; 1009 1010 if (!coherent) 1011 arch_dma_prep_coherent(page, size); 1012 } else { 1013 cpu_addr = page_address(page); 1014 } 1015 1016 *pagep = page; 1017 memset(cpu_addr, 0, alloc_size); 1018 return cpu_addr; 1019out_free_pages: 1020 dma_free_contiguous(dev, page, alloc_size); 1021 return NULL; 1022} 1023 1024static void *iommu_dma_alloc(struct device *dev, size_t size, 1025 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1026{ 1027 bool coherent = dev_is_dma_coherent(dev); 1028 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1029 struct page *page = NULL; 1030 void *cpu_addr; 1031 1032 gfp |= __GFP_ZERO; 1033 1034 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1035 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1036 return iommu_dma_alloc_remap(dev, size, handle, gfp, 1037 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); 1038 } 1039 1040 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1041 !gfpflags_allow_blocking(gfp) && !coherent) 1042 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, 1043 gfp, NULL); 1044 else 1045 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1046 if (!cpu_addr) 1047 return NULL; 1048 1049 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 1050 dev->coherent_dma_mask); 1051 if (*handle == DMA_MAPPING_ERROR) { 1052 __iommu_dma_free(dev, size, cpu_addr); 1053 return NULL; 1054 } 1055 1056 return cpu_addr; 1057} 1058 1059#ifdef CONFIG_DMA_REMAP 1060static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, 1061 dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp) 1062{ 1063 if (!gfpflags_allow_blocking(gfp)) { 1064 struct page *page; 1065 1066 page = dma_common_alloc_pages(dev, size, handle, dir, gfp); 1067 if (!page) 1068 return NULL; 1069 return page_address(page); 1070 } 1071 1072 return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, 1073 PAGE_KERNEL, 0); 1074} 1075 1076static void iommu_dma_free_noncoherent(struct device *dev, size_t size, 1077 void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir) 1078{ 1079 __iommu_dma_unmap(dev, handle, size); 1080 __iommu_dma_free(dev, size, cpu_addr); 1081} 1082#else 1083#define iommu_dma_alloc_noncoherent NULL 1084#define iommu_dma_free_noncoherent NULL 1085#endif /* CONFIG_DMA_REMAP */ 1086 1087static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 1088 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1089 unsigned long attrs) 1090{ 1091 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1092 unsigned long pfn, off = vma->vm_pgoff; 1093 int ret; 1094 1095 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1096 1097 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1098 return ret; 1099 1100 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1101 return -ENXIO; 1102 1103 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1104 struct page **pages = dma_common_find_pages(cpu_addr); 1105 1106 if (pages) 1107 return __iommu_dma_mmap(pages, size, vma); 1108 pfn = vmalloc_to_pfn(cpu_addr); 1109 } else { 1110 pfn = page_to_pfn(virt_to_page(cpu_addr)); 1111 } 1112 1113 return remap_pfn_range(vma, vma->vm_start, pfn + off, 1114 vma->vm_end - vma->vm_start, 1115 vma->vm_page_prot); 1116} 1117 1118static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 1119 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1120 unsigned long attrs) 1121{ 1122 struct page *page; 1123 int ret; 1124 1125 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1126 struct page **pages = dma_common_find_pages(cpu_addr); 1127 1128 if (pages) { 1129 return sg_alloc_table_from_pages(sgt, pages, 1130 PAGE_ALIGN(size) >> PAGE_SHIFT, 1131 0, size, GFP_KERNEL); 1132 } 1133 1134 page = vmalloc_to_page(cpu_addr); 1135 } else { 1136 page = virt_to_page(cpu_addr); 1137 } 1138 1139 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1140 if (!ret) 1141 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1142 return ret; 1143} 1144 1145static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1146{ 1147 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1148 1149 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1150} 1151 1152static const struct dma_map_ops iommu_dma_ops = { 1153 .alloc = iommu_dma_alloc, 1154 .free = iommu_dma_free, 1155 .alloc_pages = dma_common_alloc_pages, 1156 .free_pages = dma_common_free_pages, 1157 .alloc_noncoherent = iommu_dma_alloc_noncoherent, 1158 .free_noncoherent = iommu_dma_free_noncoherent, 1159 .mmap = iommu_dma_mmap, 1160 .get_sgtable = iommu_dma_get_sgtable, 1161 .map_page = iommu_dma_map_page, 1162 .unmap_page = iommu_dma_unmap_page, 1163 .map_sg = iommu_dma_map_sg, 1164 .unmap_sg = iommu_dma_unmap_sg, 1165 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 1166 .sync_single_for_device = iommu_dma_sync_single_for_device, 1167 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 1168 .sync_sg_for_device = iommu_dma_sync_sg_for_device, 1169 .map_resource = iommu_dma_map_resource, 1170 .unmap_resource = iommu_dma_unmap_resource, 1171 .get_merge_boundary = iommu_dma_get_merge_boundary, 1172}; 1173 1174/* 1175 * The IOMMU core code allocates the default DMA domain, which the underlying 1176 * IOMMU driver needs to support via the dma-iommu layer. 1177 */ 1178void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) 1179{ 1180 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1181 1182 if (!domain) 1183 goto out_err; 1184 1185 /* 1186 * The IOMMU core code allocates the default DMA domain, which the 1187 * underlying IOMMU driver needs to support via the dma-iommu layer. 1188 */ 1189 if (domain->type == IOMMU_DOMAIN_DMA) { 1190 if (iommu_dma_init_domain(domain, dma_base, size, dev)) 1191 goto out_err; 1192 dev->dma_ops = &iommu_dma_ops; 1193 } 1194 1195 return; 1196out_err: 1197 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 1198 dev_name(dev)); 1199} 1200 1201static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 1202 phys_addr_t msi_addr, struct iommu_domain *domain) 1203{ 1204 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1205 struct iommu_dma_msi_page *msi_page; 1206 dma_addr_t iova; 1207 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1208 size_t size = cookie_msi_granule(cookie); 1209 1210 msi_addr &= ~(phys_addr_t)(size - 1); 1211 list_for_each_entry(msi_page, &cookie->msi_page_list, list) 1212 if (msi_page->phys == msi_addr) 1213 return msi_page; 1214 1215 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); 1216 if (!msi_page) 1217 return NULL; 1218 1219 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 1220 if (!iova) 1221 goto out_free_page; 1222 1223 if (iommu_map(domain, iova, msi_addr, size, prot)) 1224 goto out_free_iova; 1225 1226 INIT_LIST_HEAD(&msi_page->list); 1227 msi_page->phys = msi_addr; 1228 msi_page->iova = iova; 1229 list_add(&msi_page->list, &cookie->msi_page_list); 1230 return msi_page; 1231 1232out_free_iova: 1233 iommu_dma_free_iova(cookie, iova, size); 1234out_free_page: 1235 kfree(msi_page); 1236 return NULL; 1237} 1238 1239int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1240{ 1241 struct device *dev = msi_desc_to_dev(desc); 1242 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1243 struct iommu_dma_msi_page *msi_page; 1244 static DEFINE_MUTEX(msi_prepare_lock); /* see below */ 1245 1246 if (!domain || !domain->iova_cookie) { 1247 desc->iommu_cookie = NULL; 1248 return 0; 1249 } 1250 1251 /* 1252 * In fact the whole prepare operation should already be serialised by 1253 * irq_domain_mutex further up the callchain, but that's pretty subtle 1254 * on its own, so consider this locking as failsafe documentation... 1255 */ 1256 mutex_lock(&msi_prepare_lock); 1257 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 1258 mutex_unlock(&msi_prepare_lock); 1259 1260 msi_desc_set_iommu_cookie(desc, msi_page); 1261 1262 if (!msi_page) 1263 return -ENOMEM; 1264 return 0; 1265} 1266 1267void iommu_dma_compose_msi_msg(struct msi_desc *desc, 1268 struct msi_msg *msg) 1269{ 1270 struct device *dev = msi_desc_to_dev(desc); 1271 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1272 const struct iommu_dma_msi_page *msi_page; 1273 1274 msi_page = msi_desc_get_iommu_cookie(desc); 1275 1276 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1277 return; 1278 1279 msg->address_hi = upper_32_bits(msi_page->iova); 1280 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1281 msg->address_lo += lower_32_bits(msi_page->iova); 1282} 1283 1284static int iommu_dma_init(void) 1285{ 1286 return iova_cache_get(); 1287} 1288arch_initcall(iommu_dma_init); 1289