Lines Matching refs:domain
75 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
76 * @domain: IOMMU domain to prepare for DMA-API usage
79 * callback when domain->type == IOMMU_DOMAIN_DMA.
81 int iommu_get_dma_cookie(struct iommu_domain *domain)
83 if (domain->iova_cookie) {
87 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
88 if (!domain->iova_cookie) {
98 * @domain: IOMMU domain to prepare
103 * this to initialise their own domain appropriately. Users should reserve a
106 * used by the devices attached to @domain.
108 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
112 if (domain->type != IOMMU_DOMAIN_UNMANAGED) {
116 if (domain->iova_cookie) {
126 domain->iova_cookie = cookie;
132 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
133 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
138 void iommu_put_dma_cookie(struct iommu_domain *domain)
140 struct iommu_dma_cookie *cookie = domain->iova_cookie;
157 domain->iova_cookie = NULL;
248 static int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain)
250 struct iommu_dma_cookie *cookie = domain->iova_cookie;
292 struct iommu_domain *domain;
295 domain = cookie->fq_domain;
300 domain->ops->flush_iotlb_all(domain);
304 * iommu_dma_init_domain - Initialise a DMA mapping domain
305 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
308 * @dev: Device the domain is being initialised for
312 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
315 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev)
317 struct iommu_dma_cookie *cookie = domain->iova_cookie;
329 order = __ffs(domain->pgsize_bitmap);
332 /* Check the domain allows at least some access to the device... */
333 if (domain->geometry.force_aperture) {
334 if (base > domain->geometry.aperture_end || base + size <= domain->geometry.aperture_start) {
339 base_pfn = max_t(unsigned long, base_pfn, domain->geometry.aperture_start >> order);
342 /* start_pfn is always nonzero for an already-initialised domain */
345 pr_warn("Incompatible range for DMA domain\n");
354 if (!cookie->fq_domain && !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
358 cookie->fq_domain = domain;
366 return iova_reserve_iommu_regions(dev, domain);
369 static int iommu_dma_deferred_attach(struct device *dev, struct iommu_domain *domain)
371 const struct iommu_ops *ops = domain->ops;
377 if (unlikely(ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))) {
378 return iommu_attach_device(domain, dev);
389 struct iommu_domain *domain;
394 domain = iommu_get_domain_for_dev(dev);
395 if (!domain || !domain->iova_cookie) {
399 cookie = domain->iova_cookie;
418 struct iommu_domain *domain;
421 domain = iommu_get_domain_for_dev(dev);
422 if (!domain || !domain->iova_cookie) {
426 iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
467 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, size_t size, u64 dma_limit, struct device *dev)
469 struct iommu_dma_cookie *cookie = domain->iova_cookie;
492 if (domain->geometry.force_aperture) {
493 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
524 struct iommu_domain *domain = iommu_get_dma_domain(dev);
525 struct iommu_dma_cookie *cookie = domain->iova_cookie;
535 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
539 iommu_iotlb_sync(domain, &iotlb_gather);
546 struct iommu_domain *domain = iommu_get_dma_domain(dev);
547 struct iommu_dma_cookie *cookie = domain->iova_cookie;
552 if (unlikely(iommu_dma_deferred_attach(dev, domain))) {
558 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
563 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
656 struct iommu_domain *domain = iommu_get_dma_domain(dev);
657 struct iommu_dma_cookie *cookie = domain->iova_cookie;
661 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
669 if (unlikely(iommu_dma_deferred_attach(dev, domain))) {
691 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
707 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) < size) {
910 struct iommu_domain *domain = iommu_get_dma_domain(dev);
911 struct iommu_dma_cookie *cookie = domain->iova_cookie;
920 if (unlikely(iommu_dma_deferred_attach(dev, domain))) {
968 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
977 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) {
1225 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1227 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1253 * The IOMMU core code allocates the default DMA domain, which the underlying
1258 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1260 if (!domain) {
1265 * The IOMMU core code allocates the default DMA domain, which the
1268 if (domain->type == IOMMU_DOMAIN_DMA) {
1269 if (iommu_dma_init_domain(domain, dma_base, size, dev)) {
1281 struct iommu_domain *domain)
1283 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1297 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1302 if (iommu_map(domain, iova, msi_addr, size, prot)) {
1322 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1326 if (!domain || !domain->iova_cookie) {
1337 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1351 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1355 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) {
1360 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;