13d0407baSopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
23d0407baSopenharmony_ci/*
33d0407baSopenharmony_ci * A fairly generic DMA-API to IOMMU-API glue layer.
43d0407baSopenharmony_ci *
53d0407baSopenharmony_ci * Copyright (C) 2014-2015 ARM Ltd.
63d0407baSopenharmony_ci *
73d0407baSopenharmony_ci * based in part on arch/arm/mm/dma-mapping.c:
83d0407baSopenharmony_ci * Copyright (C) 2000-2004 Russell King
93d0407baSopenharmony_ci */
103d0407baSopenharmony_ci
113d0407baSopenharmony_ci#include <linux/acpi_iort.h>
123d0407baSopenharmony_ci#include <linux/device.h>
133d0407baSopenharmony_ci#include <linux/dma-map-ops.h>
143d0407baSopenharmony_ci#include <linux/dma-iommu.h>
153d0407baSopenharmony_ci#include <linux/gfp.h>
163d0407baSopenharmony_ci#include <linux/huge_mm.h>
173d0407baSopenharmony_ci#include <linux/iommu.h>
183d0407baSopenharmony_ci#include <linux/iova.h>
193d0407baSopenharmony_ci#include <linux/irq.h>
203d0407baSopenharmony_ci#include <linux/mm.h>
213d0407baSopenharmony_ci#include <linux/mutex.h>
223d0407baSopenharmony_ci#include <linux/pci.h>
233d0407baSopenharmony_ci#include <linux/scatterlist.h>
243d0407baSopenharmony_ci#include <linux/vmalloc.h>
253d0407baSopenharmony_ci#include <linux/crash_dump.h>
263d0407baSopenharmony_ci
273d0407baSopenharmony_ci#define DMA_IOMMU_BIT_MASK_VALUE 32
283d0407baSopenharmony_ci
293d0407baSopenharmony_cistruct iommu_dma_msi_page {
303d0407baSopenharmony_ci    struct list_head list;
313d0407baSopenharmony_ci    dma_addr_t iova;
323d0407baSopenharmony_ci    phys_addr_t phys;
333d0407baSopenharmony_ci};
343d0407baSopenharmony_ci
353d0407baSopenharmony_cienum iommu_dma_cookie_type {
363d0407baSopenharmony_ci    IOMMU_DMA_IOVA_COOKIE,
373d0407baSopenharmony_ci    IOMMU_DMA_MSI_COOKIE,
383d0407baSopenharmony_ci};
393d0407baSopenharmony_ci
403d0407baSopenharmony_cistruct iommu_dma_cookie {
413d0407baSopenharmony_ci    enum iommu_dma_cookie_type type;
423d0407baSopenharmony_ci    union {
433d0407baSopenharmony_ci        /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
443d0407baSopenharmony_ci        struct iova_domain iovad;
453d0407baSopenharmony_ci        /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
463d0407baSopenharmony_ci        dma_addr_t msi_iova;
473d0407baSopenharmony_ci    };
483d0407baSopenharmony_ci    struct list_head msi_page_list;
493d0407baSopenharmony_ci
503d0407baSopenharmony_ci    /* Domain for flush queue callback; NULL if flush queue not in use */
513d0407baSopenharmony_ci    struct iommu_domain *fq_domain;
523d0407baSopenharmony_ci};
533d0407baSopenharmony_ci
543d0407baSopenharmony_cistatic inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
553d0407baSopenharmony_ci{
563d0407baSopenharmony_ci    if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
573d0407baSopenharmony_ci        return cookie->iovad.granule;
583d0407baSopenharmony_ci    }
593d0407baSopenharmony_ci    return PAGE_SIZE;
603d0407baSopenharmony_ci}
613d0407baSopenharmony_ci
623d0407baSopenharmony_cistatic struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
633d0407baSopenharmony_ci{
643d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie;
653d0407baSopenharmony_ci
663d0407baSopenharmony_ci    cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
673d0407baSopenharmony_ci    if (cookie) {
683d0407baSopenharmony_ci        INIT_LIST_HEAD(&cookie->msi_page_list);
693d0407baSopenharmony_ci        cookie->type = type;
703d0407baSopenharmony_ci    }
713d0407baSopenharmony_ci    return cookie;
723d0407baSopenharmony_ci}
733d0407baSopenharmony_ci
743d0407baSopenharmony_ci/**
753d0407baSopenharmony_ci * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
763d0407baSopenharmony_ci * @domain: IOMMU domain to prepare for DMA-API usage
773d0407baSopenharmony_ci *
783d0407baSopenharmony_ci * IOMMU drivers should normally call this from their domain_alloc
793d0407baSopenharmony_ci * callback when domain->type == IOMMU_DOMAIN_DMA.
803d0407baSopenharmony_ci */
813d0407baSopenharmony_ciint iommu_get_dma_cookie(struct iommu_domain *domain)
823d0407baSopenharmony_ci{
833d0407baSopenharmony_ci    if (domain->iova_cookie) {
843d0407baSopenharmony_ci        return -EEXIST;
853d0407baSopenharmony_ci    }
863d0407baSopenharmony_ci
873d0407baSopenharmony_ci    domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
883d0407baSopenharmony_ci    if (!domain->iova_cookie) {
893d0407baSopenharmony_ci        return -ENOMEM;
903d0407baSopenharmony_ci    }
913d0407baSopenharmony_ci
923d0407baSopenharmony_ci    return 0;
933d0407baSopenharmony_ci}
943d0407baSopenharmony_ciEXPORT_SYMBOL(iommu_get_dma_cookie);
953d0407baSopenharmony_ci
963d0407baSopenharmony_ci/**
973d0407baSopenharmony_ci * iommu_get_msi_cookie - Acquire just MSI remapping resources
983d0407baSopenharmony_ci * @domain: IOMMU domain to prepare
993d0407baSopenharmony_ci * @base: Start address of IOVA region for MSI mappings
1003d0407baSopenharmony_ci *
1013d0407baSopenharmony_ci * Users who manage their own IOVA allocation and do not want DMA API support,
1023d0407baSopenharmony_ci * but would still like to take advantage of automatic MSI remapping, can use
1033d0407baSopenharmony_ci * this to initialise their own domain appropriately. Users should reserve a
1043d0407baSopenharmony_ci * contiguous IOVA region, starting at @base, large enough to accommodate the
1053d0407baSopenharmony_ci * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
1063d0407baSopenharmony_ci * used by the devices attached to @domain.
1073d0407baSopenharmony_ci */
1083d0407baSopenharmony_ciint iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1093d0407baSopenharmony_ci{
1103d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie;
1113d0407baSopenharmony_ci
1123d0407baSopenharmony_ci    if (domain->type != IOMMU_DOMAIN_UNMANAGED) {
1133d0407baSopenharmony_ci        return -EINVAL;
1143d0407baSopenharmony_ci    }
1153d0407baSopenharmony_ci
1163d0407baSopenharmony_ci    if (domain->iova_cookie) {
1173d0407baSopenharmony_ci        return -EEXIST;
1183d0407baSopenharmony_ci    }
1193d0407baSopenharmony_ci
1203d0407baSopenharmony_ci    cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
1213d0407baSopenharmony_ci    if (!cookie) {
1223d0407baSopenharmony_ci        return -ENOMEM;
1233d0407baSopenharmony_ci    }
1243d0407baSopenharmony_ci
1253d0407baSopenharmony_ci    cookie->msi_iova = base;
1263d0407baSopenharmony_ci    domain->iova_cookie = cookie;
1273d0407baSopenharmony_ci    return 0;
1283d0407baSopenharmony_ci}
1293d0407baSopenharmony_ciEXPORT_SYMBOL(iommu_get_msi_cookie);
1303d0407baSopenharmony_ci
1313d0407baSopenharmony_ci/**
1323d0407baSopenharmony_ci * iommu_put_dma_cookie - Release a domain's DMA mapping resources
1333d0407baSopenharmony_ci * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
1343d0407baSopenharmony_ci *          iommu_get_msi_cookie()
1353d0407baSopenharmony_ci *
1363d0407baSopenharmony_ci * IOMMU drivers should normally call this from their domain_free callback.
1373d0407baSopenharmony_ci */
1383d0407baSopenharmony_civoid iommu_put_dma_cookie(struct iommu_domain *domain)
1393d0407baSopenharmony_ci{
1403d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
1413d0407baSopenharmony_ci    struct iommu_dma_msi_page *msi, *tmp;
1423d0407baSopenharmony_ci
1433d0407baSopenharmony_ci    if (!cookie) {
1443d0407baSopenharmony_ci        return;
1453d0407baSopenharmony_ci    }
1463d0407baSopenharmony_ci
1473d0407baSopenharmony_ci    if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
1483d0407baSopenharmony_ci        put_iova_domain(&cookie->iovad);
1493d0407baSopenharmony_ci    }
1503d0407baSopenharmony_ci
1513d0407baSopenharmony_ci    list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list)
1523d0407baSopenharmony_ci    {
1533d0407baSopenharmony_ci        list_del(&msi->list);
1543d0407baSopenharmony_ci        kfree(msi);
1553d0407baSopenharmony_ci    }
1563d0407baSopenharmony_ci    kfree(cookie);
1573d0407baSopenharmony_ci    domain->iova_cookie = NULL;
1583d0407baSopenharmony_ci}
1593d0407baSopenharmony_ciEXPORT_SYMBOL(iommu_put_dma_cookie);
1603d0407baSopenharmony_ci
1613d0407baSopenharmony_ci/**
1623d0407baSopenharmony_ci * iommu_dma_get_resv_regions - Reserved region driver helper
1633d0407baSopenharmony_ci * @dev: Device from iommu_get_resv_regions()
1643d0407baSopenharmony_ci * @list: Reserved region list from iommu_get_resv_regions()
1653d0407baSopenharmony_ci *
1663d0407baSopenharmony_ci * IOMMU drivers can use this to implement their .get_resv_regions callback
1673d0407baSopenharmony_ci * for general non-IOMMU-specific reservations. Currently, this covers GICv3
1683d0407baSopenharmony_ci * ITS region reservation on ACPI based ARM platforms that may require HW MSI
1693d0407baSopenharmony_ci * reservation.
1703d0407baSopenharmony_ci */
1713d0407baSopenharmony_civoid iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
1723d0407baSopenharmony_ci{
1733d0407baSopenharmony_ci    if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) {
1743d0407baSopenharmony_ci        iort_iommu_msi_get_resv_regions(dev, list);
1753d0407baSopenharmony_ci    }
1763d0407baSopenharmony_ci}
1773d0407baSopenharmony_ciEXPORT_SYMBOL(iommu_dma_get_resv_regions);
1783d0407baSopenharmony_ci
1793d0407baSopenharmony_cistatic int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, phys_addr_t start, phys_addr_t end)
1803d0407baSopenharmony_ci{
1813d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
1823d0407baSopenharmony_ci    struct iommu_dma_msi_page *msi_page;
1833d0407baSopenharmony_ci    int i, num_pages;
1843d0407baSopenharmony_ci
1853d0407baSopenharmony_ci    start -= iova_offset(iovad, start);
1863d0407baSopenharmony_ci    num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
1873d0407baSopenharmony_ci
1883d0407baSopenharmony_ci    for (i = 0; i < num_pages; i++) {
1893d0407baSopenharmony_ci        msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
1903d0407baSopenharmony_ci        if (!msi_page) {
1913d0407baSopenharmony_ci            return -ENOMEM;
1923d0407baSopenharmony_ci        }
1933d0407baSopenharmony_ci
1943d0407baSopenharmony_ci        msi_page->phys = start;
1953d0407baSopenharmony_ci        msi_page->iova = start;
1963d0407baSopenharmony_ci        INIT_LIST_HEAD(&msi_page->list);
1973d0407baSopenharmony_ci        list_add(&msi_page->list, &cookie->msi_page_list);
1983d0407baSopenharmony_ci        start += iovad->granule;
1993d0407baSopenharmony_ci    }
2003d0407baSopenharmony_ci
2013d0407baSopenharmony_ci    return 0;
2023d0407baSopenharmony_ci}
2033d0407baSopenharmony_ci
2043d0407baSopenharmony_cistatic int iova_reserve_pci_windows(struct pci_dev *dev, struct iova_domain *iovad)
2053d0407baSopenharmony_ci{
2063d0407baSopenharmony_ci    struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
2073d0407baSopenharmony_ci    struct resource_entry *window;
2083d0407baSopenharmony_ci    unsigned long lo, hi;
2093d0407baSopenharmony_ci    phys_addr_t start = 0, end;
2103d0407baSopenharmony_ci
2113d0407baSopenharmony_ci    resource_list_for_each_entry(window, &bridge->windows)
2123d0407baSopenharmony_ci    {
2133d0407baSopenharmony_ci        if (resource_type(window->res) != IORESOURCE_MEM) {
2143d0407baSopenharmony_ci            continue;
2153d0407baSopenharmony_ci        }
2163d0407baSopenharmony_ci
2173d0407baSopenharmony_ci        lo = iova_pfn(iovad, window->res->start - window->offset);
2183d0407baSopenharmony_ci        hi = iova_pfn(iovad, window->res->end - window->offset);
2193d0407baSopenharmony_ci        reserve_iova(iovad, lo, hi);
2203d0407baSopenharmony_ci    }
2213d0407baSopenharmony_ci
2223d0407baSopenharmony_ci    /* Get reserved DMA windows from host bridge */
2233d0407baSopenharmony_ci    resource_list_for_each_entry(window, &bridge->dma_ranges)
2243d0407baSopenharmony_ci    {
2253d0407baSopenharmony_ci        end = window->res->start - window->offset;
2263d0407baSopenharmony_ci    resv_iova:
2273d0407baSopenharmony_ci        if (end > start) {
2283d0407baSopenharmony_ci            lo = iova_pfn(iovad, start);
2293d0407baSopenharmony_ci            hi = iova_pfn(iovad, end);
2303d0407baSopenharmony_ci            reserve_iova(iovad, lo, hi);
2313d0407baSopenharmony_ci        } else if (end < start) {
2323d0407baSopenharmony_ci            /* dma_ranges list should be sorted */
2333d0407baSopenharmony_ci            dev_err(&dev->dev, "Failed to reserve IOVA [%pa-%pa]\n", &start, &end);
2343d0407baSopenharmony_ci            return -EINVAL;
2353d0407baSopenharmony_ci        }
2363d0407baSopenharmony_ci
2373d0407baSopenharmony_ci        start = window->res->end - window->offset + 1;
2383d0407baSopenharmony_ci        /* If window is last entry */
2393d0407baSopenharmony_ci        if (window->node.next == &bridge->dma_ranges && end != ~(phys_addr_t)0) {
2403d0407baSopenharmony_ci            end = ~(phys_addr_t)0;
2413d0407baSopenharmony_ci            goto resv_iova;
2423d0407baSopenharmony_ci        }
2433d0407baSopenharmony_ci    }
2443d0407baSopenharmony_ci
2453d0407baSopenharmony_ci    return 0;
2463d0407baSopenharmony_ci}
2473d0407baSopenharmony_ci
2483d0407baSopenharmony_cistatic int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain)
2493d0407baSopenharmony_ci{
2503d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
2513d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
2523d0407baSopenharmony_ci    struct iommu_resv_region *region;
2533d0407baSopenharmony_ci    LIST_HEAD(resv_regions);
2543d0407baSopenharmony_ci    int ret = 0;
2553d0407baSopenharmony_ci
2563d0407baSopenharmony_ci    if (dev_is_pci(dev)) {
2573d0407baSopenharmony_ci        ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
2583d0407baSopenharmony_ci        if (ret) {
2593d0407baSopenharmony_ci            return ret;
2603d0407baSopenharmony_ci        }
2613d0407baSopenharmony_ci    }
2623d0407baSopenharmony_ci
2633d0407baSopenharmony_ci    iommu_get_resv_regions(dev, &resv_regions);
2643d0407baSopenharmony_ci    list_for_each_entry(region, &resv_regions, list)
2653d0407baSopenharmony_ci    {
2663d0407baSopenharmony_ci        unsigned long lo, hi;
2673d0407baSopenharmony_ci
2683d0407baSopenharmony_ci        /* We ARE the software that manages these! */
2693d0407baSopenharmony_ci        if (region->type == IOMMU_RESV_SW_MSI) {
2703d0407baSopenharmony_ci            continue;
2713d0407baSopenharmony_ci        }
2723d0407baSopenharmony_ci
2733d0407baSopenharmony_ci        lo = iova_pfn(iovad, region->start);
2743d0407baSopenharmony_ci        hi = iova_pfn(iovad, region->start + region->length - 1);
2753d0407baSopenharmony_ci        reserve_iova(iovad, lo, hi);
2763d0407baSopenharmony_ci
2773d0407baSopenharmony_ci        if (region->type == IOMMU_RESV_MSI) {
2783d0407baSopenharmony_ci            ret = cookie_init_hw_msi_region(cookie, region->start, region->start + region->length);
2793d0407baSopenharmony_ci        }
2803d0407baSopenharmony_ci        if (ret) {
2813d0407baSopenharmony_ci            break;
2823d0407baSopenharmony_ci        }
2833d0407baSopenharmony_ci    }
2843d0407baSopenharmony_ci    iommu_put_resv_regions(dev, &resv_regions);
2853d0407baSopenharmony_ci
2863d0407baSopenharmony_ci    return ret;
2873d0407baSopenharmony_ci}
2883d0407baSopenharmony_ci
2893d0407baSopenharmony_cistatic void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
2903d0407baSopenharmony_ci{
2913d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie;
2923d0407baSopenharmony_ci    struct iommu_domain *domain;
2933d0407baSopenharmony_ci
2943d0407baSopenharmony_ci    cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
2953d0407baSopenharmony_ci    domain = cookie->fq_domain;
2963d0407baSopenharmony_ci    /*
2973d0407baSopenharmony_ci     * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
2983d0407baSopenharmony_ci     * implies that ops->flush_iotlb_all must be non-NULL.
2993d0407baSopenharmony_ci     */
3003d0407baSopenharmony_ci    domain->ops->flush_iotlb_all(domain);
3013d0407baSopenharmony_ci}
3023d0407baSopenharmony_ci
3033d0407baSopenharmony_ci/**
3043d0407baSopenharmony_ci * iommu_dma_init_domain - Initialise a DMA mapping domain
3053d0407baSopenharmony_ci * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
3063d0407baSopenharmony_ci * @base: IOVA at which the mappable address space starts
3073d0407baSopenharmony_ci * @size: Size of IOVA space
3083d0407baSopenharmony_ci * @dev: Device the domain is being initialised for
3093d0407baSopenharmony_ci *
3103d0407baSopenharmony_ci * @base and @size should be exact multiples of IOMMU page granularity to
3113d0407baSopenharmony_ci * avoid rounding surprises. If necessary, we reserve the page at address 0
3123d0407baSopenharmony_ci * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
3133d0407baSopenharmony_ci * any change which could make prior IOVAs invalid will fail.
3143d0407baSopenharmony_ci */
3153d0407baSopenharmony_cistatic int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev)
3163d0407baSopenharmony_ci{
3173d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
3183d0407baSopenharmony_ci    unsigned long order, base_pfn;
3193d0407baSopenharmony_ci    struct iova_domain *iovad;
3203d0407baSopenharmony_ci    int attr;
3213d0407baSopenharmony_ci
3223d0407baSopenharmony_ci    if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) {
3233d0407baSopenharmony_ci        return -EINVAL;
3243d0407baSopenharmony_ci    }
3253d0407baSopenharmony_ci
3263d0407baSopenharmony_ci    iovad = &cookie->iovad;
3273d0407baSopenharmony_ci
3283d0407baSopenharmony_ci    /* Use the smallest supported page size for IOVA granularity */
3293d0407baSopenharmony_ci    order = __ffs(domain->pgsize_bitmap);
3303d0407baSopenharmony_ci    base_pfn = max_t(unsigned long, 1, base >> order);
3313d0407baSopenharmony_ci
3323d0407baSopenharmony_ci    /* Check the domain allows at least some access to the device... */
3333d0407baSopenharmony_ci    if (domain->geometry.force_aperture) {
3343d0407baSopenharmony_ci        if (base > domain->geometry.aperture_end || base + size <= domain->geometry.aperture_start) {
3353d0407baSopenharmony_ci            pr_warn("specified DMA range outside IOMMU capability\n");
3363d0407baSopenharmony_ci            return -EFAULT;
3373d0407baSopenharmony_ci        }
3383d0407baSopenharmony_ci        /* ...then finally give it a kicking to make sure it fits */
3393d0407baSopenharmony_ci        base_pfn = max_t(unsigned long, base_pfn, domain->geometry.aperture_start >> order);
3403d0407baSopenharmony_ci    }
3413d0407baSopenharmony_ci
3423d0407baSopenharmony_ci    /* start_pfn is always nonzero for an already-initialised domain */
3433d0407baSopenharmony_ci    if (iovad->start_pfn) {
3443d0407baSopenharmony_ci        if (1UL << order != iovad->granule || base_pfn != iovad->start_pfn) {
3453d0407baSopenharmony_ci            pr_warn("Incompatible range for DMA domain\n");
3463d0407baSopenharmony_ci            return -EFAULT;
3473d0407baSopenharmony_ci        }
3483d0407baSopenharmony_ci
3493d0407baSopenharmony_ci        return 0;
3503d0407baSopenharmony_ci    }
3513d0407baSopenharmony_ci
3523d0407baSopenharmony_ci    init_iova_domain(iovad, 1UL << order, base_pfn);
3533d0407baSopenharmony_ci
3543d0407baSopenharmony_ci    if (!cookie->fq_domain && !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
3553d0407baSopenharmony_ci        if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL)) {
3563d0407baSopenharmony_ci            pr_warn("iova flush queue initialization failed\n");
3573d0407baSopenharmony_ci        } else {
3583d0407baSopenharmony_ci            cookie->fq_domain = domain;
3593d0407baSopenharmony_ci        }
3603d0407baSopenharmony_ci    }
3613d0407baSopenharmony_ci
3623d0407baSopenharmony_ci    if (!dev) {
3633d0407baSopenharmony_ci        return 0;
3643d0407baSopenharmony_ci    }
3653d0407baSopenharmony_ci
3663d0407baSopenharmony_ci    return iova_reserve_iommu_regions(dev, domain);
3673d0407baSopenharmony_ci}
3683d0407baSopenharmony_ci
3693d0407baSopenharmony_cistatic int iommu_dma_deferred_attach(struct device *dev, struct iommu_domain *domain)
3703d0407baSopenharmony_ci{
3713d0407baSopenharmony_ci    const struct iommu_ops *ops = domain->ops;
3723d0407baSopenharmony_ci
3733d0407baSopenharmony_ci    if (!is_kdump_kernel()) {
3743d0407baSopenharmony_ci        return 0;
3753d0407baSopenharmony_ci    }
3763d0407baSopenharmony_ci
3773d0407baSopenharmony_ci    if (unlikely(ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))) {
3783d0407baSopenharmony_ci        return iommu_attach_device(domain, dev);
3793d0407baSopenharmony_ci    }
3803d0407baSopenharmony_ci
3813d0407baSopenharmony_ci    return 0;
3823d0407baSopenharmony_ci}
3833d0407baSopenharmony_ci
3843d0407baSopenharmony_ci/*
3853d0407baSopenharmony_ci * Should be called prior to using dma-apis
3863d0407baSopenharmony_ci */
3873d0407baSopenharmony_ciint iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, u64 size)
3883d0407baSopenharmony_ci{
3893d0407baSopenharmony_ci    struct iommu_domain *domain;
3903d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie;
3913d0407baSopenharmony_ci    struct iova_domain *iovad;
3923d0407baSopenharmony_ci    unsigned long pfn_lo, pfn_hi;
3933d0407baSopenharmony_ci
3943d0407baSopenharmony_ci    domain = iommu_get_domain_for_dev(dev);
3953d0407baSopenharmony_ci    if (!domain || !domain->iova_cookie) {
3963d0407baSopenharmony_ci        return -EINVAL;
3973d0407baSopenharmony_ci    }
3983d0407baSopenharmony_ci
3993d0407baSopenharmony_ci    cookie = domain->iova_cookie;
4003d0407baSopenharmony_ci    iovad = &cookie->iovad;
4013d0407baSopenharmony_ci
4023d0407baSopenharmony_ci    /* iova will be freed automatically by put_iova_domain() */
4033d0407baSopenharmony_ci    pfn_lo = iova_pfn(iovad, base);
4043d0407baSopenharmony_ci    pfn_hi = iova_pfn(iovad, base + size - 1);
4053d0407baSopenharmony_ci    if (!reserve_iova(iovad, pfn_lo, pfn_hi)) {
4063d0407baSopenharmony_ci        return -EINVAL;
4073d0407baSopenharmony_ci    }
4083d0407baSopenharmony_ci
4093d0407baSopenharmony_ci    return 0;
4103d0407baSopenharmony_ci}
4113d0407baSopenharmony_ciEXPORT_SYMBOL(iommu_dma_reserve_iova);
4123d0407baSopenharmony_ci
4133d0407baSopenharmony_ci/*
4143d0407baSopenharmony_ci * Should be called prior to using dma-apis.
4153d0407baSopenharmony_ci */
4163d0407baSopenharmony_ciint iommu_dma_enable_best_fit_algo(struct device *dev)
4173d0407baSopenharmony_ci{
4183d0407baSopenharmony_ci    struct iommu_domain *domain;
4193d0407baSopenharmony_ci    struct iova_domain *iovad;
4203d0407baSopenharmony_ci
4213d0407baSopenharmony_ci    domain = iommu_get_domain_for_dev(dev);
4223d0407baSopenharmony_ci    if (!domain || !domain->iova_cookie) {
4233d0407baSopenharmony_ci        return -EINVAL;
4243d0407baSopenharmony_ci    }
4253d0407baSopenharmony_ci
4263d0407baSopenharmony_ci    iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
4273d0407baSopenharmony_ci    iovad->best_fit = true;
4283d0407baSopenharmony_ci    return 0;
4293d0407baSopenharmony_ci}
4303d0407baSopenharmony_ciEXPORT_SYMBOL(iommu_dma_enable_best_fit_algo);
4313d0407baSopenharmony_ci
4323d0407baSopenharmony_ci/**
4333d0407baSopenharmony_ci * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
4343d0407baSopenharmony_ci *                    page flags.
4353d0407baSopenharmony_ci * @dir: Direction of DMA transfer
4363d0407baSopenharmony_ci * @coherent: Is the DMA master cache-coherent?
4373d0407baSopenharmony_ci * @attrs: DMA attributes for the mapping
4383d0407baSopenharmony_ci *
4393d0407baSopenharmony_ci * Return: corresponding IOMMU API page protection flags
4403d0407baSopenharmony_ci */
4413d0407baSopenharmony_cistatic int dma_info_to_prot(enum dma_data_direction dir, bool coherent, unsigned long attrs)
4423d0407baSopenharmony_ci{
4433d0407baSopenharmony_ci    int prot = coherent ? IOMMU_CACHE : 0;
4443d0407baSopenharmony_ci
4453d0407baSopenharmony_ci    if (attrs & DMA_ATTR_PRIVILEGED) {
4463d0407baSopenharmony_ci        prot |= IOMMU_PRIV;
4473d0407baSopenharmony_ci    }
4483d0407baSopenharmony_ci    if (attrs & DMA_ATTR_SYS_CACHE_ONLY) {
4493d0407baSopenharmony_ci        prot |= IOMMU_SYS_CACHE_ONLY;
4503d0407baSopenharmony_ci    }
4513d0407baSopenharmony_ci    if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA) {
4523d0407baSopenharmony_ci        prot |= IOMMU_SYS_CACHE_ONLY_NWA;
4533d0407baSopenharmony_ci    }
4543d0407baSopenharmony_ci
4553d0407baSopenharmony_ci    switch (dir) {
4563d0407baSopenharmony_ci        case DMA_BIDIRECTIONAL:
4573d0407baSopenharmony_ci            return prot | IOMMU_READ | IOMMU_WRITE;
4583d0407baSopenharmony_ci        case DMA_TO_DEVICE:
4593d0407baSopenharmony_ci            return prot | IOMMU_READ;
4603d0407baSopenharmony_ci        case DMA_FROM_DEVICE:
4613d0407baSopenharmony_ci            return prot | IOMMU_WRITE;
4623d0407baSopenharmony_ci        default:
4633d0407baSopenharmony_ci            return 0;
4643d0407baSopenharmony_ci    }
4653d0407baSopenharmony_ci}
4663d0407baSopenharmony_ci
4673d0407baSopenharmony_cistatic dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, size_t size, u64 dma_limit, struct device *dev)
4683d0407baSopenharmony_ci{
4693d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
4703d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
4713d0407baSopenharmony_ci    unsigned long shift, iova_len, iova = 0;
4723d0407baSopenharmony_ci
4733d0407baSopenharmony_ci    if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
4743d0407baSopenharmony_ci        cookie->msi_iova += size;
4753d0407baSopenharmony_ci        return cookie->msi_iova - size;
4763d0407baSopenharmony_ci    }
4773d0407baSopenharmony_ci
4783d0407baSopenharmony_ci    shift = iova_shift(iovad);
4793d0407baSopenharmony_ci    iova_len = size >> shift;
4803d0407baSopenharmony_ci    /*
4813d0407baSopenharmony_ci     * Freeing non-power-of-two-sized allocations back into the IOVA caches
4823d0407baSopenharmony_ci     * will come back to bite us badly, so we have to waste a bit of space
4833d0407baSopenharmony_ci     * rounding up anything cacheable to make sure that can't happen. The
4843d0407baSopenharmony_ci     * order of the unadjusted size will still match upon freeing.
4853d0407baSopenharmony_ci     */
4863d0407baSopenharmony_ci    if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) {
4873d0407baSopenharmony_ci        iova_len = roundup_pow_of_two(iova_len);
4883d0407baSopenharmony_ci    }
4893d0407baSopenharmony_ci
4903d0407baSopenharmony_ci    dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
4913d0407baSopenharmony_ci
4923d0407baSopenharmony_ci    if (domain->geometry.force_aperture) {
4933d0407baSopenharmony_ci        dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
4943d0407baSopenharmony_ci    }
4953d0407baSopenharmony_ci
4963d0407baSopenharmony_ci    /* Try to get PCI devices a SAC address */
4973d0407baSopenharmony_ci    if (dma_limit > DMA_BIT_MASK(DMA_IOMMU_BIT_MASK_VALUE) && dev_is_pci(dev)) {
4983d0407baSopenharmony_ci        iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(DMA_IOMMU_BIT_MASK_VALUE) >> shift, false);
4993d0407baSopenharmony_ci    }
5003d0407baSopenharmony_ci
5013d0407baSopenharmony_ci    if (!iova) {
5023d0407baSopenharmony_ci        iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
5033d0407baSopenharmony_ci    }
5043d0407baSopenharmony_ci
5053d0407baSopenharmony_ci    return (dma_addr_t)iova << shift;
5063d0407baSopenharmony_ci}
5073d0407baSopenharmony_ci
5083d0407baSopenharmony_cistatic void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, dma_addr_t iova, size_t size)
5093d0407baSopenharmony_ci{
5103d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
5113d0407baSopenharmony_ci
5123d0407baSopenharmony_ci    /* The MSI case is only ever cleaning up its most recent allocation */
5133d0407baSopenharmony_ci    if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
5143d0407baSopenharmony_ci        cookie->msi_iova -= size;
5153d0407baSopenharmony_ci    } else if (cookie->fq_domain) { /* non-strict mode */
5163d0407baSopenharmony_ci        queue_iova(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad), 0);
5173d0407baSopenharmony_ci    } else {
5183d0407baSopenharmony_ci        free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad));
5193d0407baSopenharmony_ci    }
5203d0407baSopenharmony_ci}
5213d0407baSopenharmony_ci
5223d0407baSopenharmony_cistatic void iommu_dma_unmap_ext(struct device *dev, dma_addr_t dma_addr, size_t size)
5233d0407baSopenharmony_ci{
5243d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_dma_domain(dev);
5253d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
5263d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
5273d0407baSopenharmony_ci    size_t iova_off = iova_offset(iovad, dma_addr);
5283d0407baSopenharmony_ci    struct iommu_iotlb_gather iotlb_gather;
5293d0407baSopenharmony_ci    size_t unmapped;
5303d0407baSopenharmony_ci
5313d0407baSopenharmony_ci    dma_addr -= iova_off;
5323d0407baSopenharmony_ci    size = iova_align(iovad, size + iova_off);
5333d0407baSopenharmony_ci    iommu_iotlb_gather_init(&iotlb_gather);
5343d0407baSopenharmony_ci
5353d0407baSopenharmony_ci    unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
5363d0407baSopenharmony_ci    WARN_ON(unmapped != size);
5373d0407baSopenharmony_ci
5383d0407baSopenharmony_ci    if (!cookie->fq_domain) {
5393d0407baSopenharmony_ci        iommu_iotlb_sync(domain, &iotlb_gather);
5403d0407baSopenharmony_ci    }
5413d0407baSopenharmony_ci    iommu_dma_free_iova(cookie, dma_addr, size);
5423d0407baSopenharmony_ci}
5433d0407baSopenharmony_ci
5443d0407baSopenharmony_cistatic dma_addr_t iommu_dma_map_ext(struct device *dev, phys_addr_t phys, size_t size, int prot, u64 dma_mask)
5453d0407baSopenharmony_ci{
5463d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_dma_domain(dev);
5473d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
5483d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
5493d0407baSopenharmony_ci    size_t iova_off = iova_offset(iovad, phys);
5503d0407baSopenharmony_ci    dma_addr_t iova;
5513d0407baSopenharmony_ci
5523d0407baSopenharmony_ci    if (unlikely(iommu_dma_deferred_attach(dev, domain))) {
5533d0407baSopenharmony_ci        return DMA_MAPPING_ERROR;
5543d0407baSopenharmony_ci    }
5553d0407baSopenharmony_ci
5563d0407baSopenharmony_ci    size = iova_align(iovad, size + iova_off);
5573d0407baSopenharmony_ci
5583d0407baSopenharmony_ci    iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
5593d0407baSopenharmony_ci    if (!iova) {
5603d0407baSopenharmony_ci        return DMA_MAPPING_ERROR;
5613d0407baSopenharmony_ci    }
5623d0407baSopenharmony_ci
5633d0407baSopenharmony_ci    if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
5643d0407baSopenharmony_ci        iommu_dma_free_iova(cookie, iova, size);
5653d0407baSopenharmony_ci        return DMA_MAPPING_ERROR;
5663d0407baSopenharmony_ci    }
5673d0407baSopenharmony_ci    return iova + iova_off;
5683d0407baSopenharmony_ci}
5693d0407baSopenharmony_ci
5703d0407baSopenharmony_cistatic void iommu_dma_free_pages_ext(struct page **pages, int count)
5713d0407baSopenharmony_ci{
5723d0407baSopenharmony_ci    while (count--) {
5733d0407baSopenharmony_ci        __free_page(pages[count]);
5743d0407baSopenharmony_ci    }
5753d0407baSopenharmony_ci    kvfree(pages);
5763d0407baSopenharmony_ci}
5773d0407baSopenharmony_ci
5783d0407baSopenharmony_cistatic struct page **iommu_dma_alloc_pages_ext(struct device *dev, unsigned int count, unsigned long order_mask,
5793d0407baSopenharmony_ci                                               gfp_t gfp)
5803d0407baSopenharmony_ci{
5813d0407baSopenharmony_ci    struct page **pages;
5823d0407baSopenharmony_ci    unsigned int i = 0, nid = dev_to_node(dev);
5833d0407baSopenharmony_ci
5843d0407baSopenharmony_ci    order_mask &= (2U << MAX_ORDER) - 1;
5853d0407baSopenharmony_ci    if (!order_mask) {
5863d0407baSopenharmony_ci        return NULL;
5873d0407baSopenharmony_ci    }
5883d0407baSopenharmony_ci
5893d0407baSopenharmony_ci    pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
5903d0407baSopenharmony_ci    if (!pages) {
5913d0407baSopenharmony_ci        return NULL;
5923d0407baSopenharmony_ci    }
5933d0407baSopenharmony_ci
5943d0407baSopenharmony_ci    /* IOMMU can map any pages, so himem can also be used here */
5953d0407baSopenharmony_ci    gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
5963d0407baSopenharmony_ci
5973d0407baSopenharmony_ci    /* It makes no sense to muck about with huge pages */
5983d0407baSopenharmony_ci    gfp &= ~__GFP_COMP;
5993d0407baSopenharmony_ci
6003d0407baSopenharmony_ci    while (count) {
6013d0407baSopenharmony_ci        struct page *page = NULL;
6023d0407baSopenharmony_ci        unsigned int order_size;
6033d0407baSopenharmony_ci
6043d0407baSopenharmony_ci        /*
6053d0407baSopenharmony_ci         * Higher-order allocations are a convenience rather
6063d0407baSopenharmony_ci         * than a necessity, hence using __GFP_NORETRY until
6073d0407baSopenharmony_ci         * falling back to minimum-order allocations.
6083d0407baSopenharmony_ci         */
6093d0407baSopenharmony_ci        for (order_mask &= (2U << __fls(count)) - 1; order_mask; order_mask &= ~order_size) {
6103d0407baSopenharmony_ci            unsigned int order = __fls(order_mask);
6113d0407baSopenharmony_ci            gfp_t alloc_flags = gfp;
6123d0407baSopenharmony_ci
6133d0407baSopenharmony_ci            order_size = 1U << order;
6143d0407baSopenharmony_ci            if (order_mask > (unsigned long)order_size) {
6153d0407baSopenharmony_ci                alloc_flags |= __GFP_NORETRY;
6163d0407baSopenharmony_ci            }
6173d0407baSopenharmony_ci            page = alloc_pages_node(nid, alloc_flags, order);
6183d0407baSopenharmony_ci            if (!page) {
6193d0407baSopenharmony_ci                continue;
6203d0407baSopenharmony_ci            }
6213d0407baSopenharmony_ci            if (order) {
6223d0407baSopenharmony_ci                split_page(page, order);
6233d0407baSopenharmony_ci            }
6243d0407baSopenharmony_ci            break;
6253d0407baSopenharmony_ci        }
6263d0407baSopenharmony_ci        if (!page) {
6273d0407baSopenharmony_ci            iommu_dma_free_pages_ext(pages, i);
6283d0407baSopenharmony_ci            return NULL;
6293d0407baSopenharmony_ci        }
6303d0407baSopenharmony_ci        count -= order_size;
6313d0407baSopenharmony_ci        while (order_size--) {
6323d0407baSopenharmony_ci            pages[i++] = page++;
6333d0407baSopenharmony_ci        }
6343d0407baSopenharmony_ci    }
6353d0407baSopenharmony_ci    return pages;
6363d0407baSopenharmony_ci}
6373d0407baSopenharmony_ci
6383d0407baSopenharmony_ci/**
6393d0407baSopenharmony_ci * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
6403d0407baSopenharmony_ci * @dev: Device to allocate memory for. Must be a real device
6413d0407baSopenharmony_ci *     attached to an iommu_dma_domain
6423d0407baSopenharmony_ci * @size: Size of buffer in bytes
6433d0407baSopenharmony_ci * @dma_handle: Out argument for allocated DMA handle
6443d0407baSopenharmony_ci * @gfp: Allocation flags
6453d0407baSopenharmony_ci * @prot: pgprot_t to use for the remapped mapping
6463d0407baSopenharmony_ci * @attrs: DMA attributes for this allocation
6473d0407baSopenharmony_ci *
6483d0407baSopenharmony_ci * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
6493d0407baSopenharmony_ci * but an IOMMU which supports smaller pages might not map the whole thing.
6503d0407baSopenharmony_ci *
6513d0407baSopenharmony_ci * Return: Mapped virtual address, or NULL on failure.
6523d0407baSopenharmony_ci */
6533d0407baSopenharmony_cistatic void *iommu_dma_alloc_remap(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
6543d0407baSopenharmony_ci                                   unsigned long attrs)
6553d0407baSopenharmony_ci{
6563d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_dma_domain(dev);
6573d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
6583d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
6593d0407baSopenharmony_ci    bool coherent = dev_is_dma_coherent(dev);
6603d0407baSopenharmony_ci    int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
6613d0407baSopenharmony_ci    unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
6623d0407baSopenharmony_ci    struct page **pages;
6633d0407baSopenharmony_ci    struct sg_table sgt;
6643d0407baSopenharmony_ci    dma_addr_t iova;
6653d0407baSopenharmony_ci    void *vaddr;
6663d0407baSopenharmony_ci
6673d0407baSopenharmony_ci    *dma_handle = DMA_MAPPING_ERROR;
6683d0407baSopenharmony_ci
6693d0407baSopenharmony_ci    if (unlikely(iommu_dma_deferred_attach(dev, domain))) {
6703d0407baSopenharmony_ci        return NULL;
6713d0407baSopenharmony_ci    }
6723d0407baSopenharmony_ci
6733d0407baSopenharmony_ci    min_size = alloc_sizes & -alloc_sizes;
6743d0407baSopenharmony_ci    if (min_size < PAGE_SIZE) {
6753d0407baSopenharmony_ci        min_size = PAGE_SIZE;
6763d0407baSopenharmony_ci        alloc_sizes |= PAGE_SIZE;
6773d0407baSopenharmony_ci    } else {
6783d0407baSopenharmony_ci        size = ALIGN(size, min_size);
6793d0407baSopenharmony_ci    }
6803d0407baSopenharmony_ci    if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) {
6813d0407baSopenharmony_ci        alloc_sizes = min_size;
6823d0407baSopenharmony_ci    }
6833d0407baSopenharmony_ci
6843d0407baSopenharmony_ci    count = PAGE_ALIGN(size) >> PAGE_SHIFT;
6853d0407baSopenharmony_ci    pages = iommu_dma_alloc_pages_ext(dev, count, alloc_sizes >> PAGE_SHIFT, gfp);
6863d0407baSopenharmony_ci    if (!pages) {
6873d0407baSopenharmony_ci        return NULL;
6883d0407baSopenharmony_ci    }
6893d0407baSopenharmony_ci
6903d0407baSopenharmony_ci    size = iova_align(iovad, size);
6913d0407baSopenharmony_ci    iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
6923d0407baSopenharmony_ci    if (!iova) {
6933d0407baSopenharmony_ci        goto out_free_pages;
6943d0407baSopenharmony_ci    }
6953d0407baSopenharmony_ci
6963d0407baSopenharmony_ci    if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) {
6973d0407baSopenharmony_ci        goto out_free_iova;
6983d0407baSopenharmony_ci    }
6993d0407baSopenharmony_ci
7003d0407baSopenharmony_ci    if (!(ioprot & IOMMU_CACHE)) {
7013d0407baSopenharmony_ci        struct scatterlist *sg;
7023d0407baSopenharmony_ci        int i;
7033d0407baSopenharmony_ci
7043d0407baSopenharmony_ci        for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) arch_dma_prep_coherent(sg_page(sg), sg->length);
7053d0407baSopenharmony_ci    }
7063d0407baSopenharmony_ci
7073d0407baSopenharmony_ci    if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) < size) {
7083d0407baSopenharmony_ci        goto out_free_sg;
7093d0407baSopenharmony_ci    }
7103d0407baSopenharmony_ci
7113d0407baSopenharmony_ci    vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0));
7123d0407baSopenharmony_ci    if (!vaddr) {
7133d0407baSopenharmony_ci        goto out_unmap;
7143d0407baSopenharmony_ci    }
7153d0407baSopenharmony_ci
7163d0407baSopenharmony_ci    *dma_handle = iova;
7173d0407baSopenharmony_ci    sg_free_table(&sgt);
7183d0407baSopenharmony_ci    return vaddr;
7193d0407baSopenharmony_ci
7203d0407baSopenharmony_ciout_unmap:
7213d0407baSopenharmony_ci    iommu_dma_unmap_ext(dev, iova, size);
7223d0407baSopenharmony_ciout_free_sg:
7233d0407baSopenharmony_ci    sg_free_table(&sgt);
7243d0407baSopenharmony_ciout_free_iova:
7253d0407baSopenharmony_ci    iommu_dma_free_iova(cookie, iova, size);
7263d0407baSopenharmony_ciout_free_pages:
7273d0407baSopenharmony_ci    iommu_dma_free_pages_ext(pages, count);
7283d0407baSopenharmony_ci    return NULL;
7293d0407baSopenharmony_ci}
7303d0407baSopenharmony_ci
7313d0407baSopenharmony_ci/**
7323d0407baSopenharmony_ci * iommu_dma_mmap_ext - Map a buffer into provided user VMA
7333d0407baSopenharmony_ci * @pages: Array representing buffer from __iommu_dma_alloc()
7343d0407baSopenharmony_ci * @size: Size of buffer in bytes
7353d0407baSopenharmony_ci * @vma: VMA describing requested userspace mapping
7363d0407baSopenharmony_ci *
7373d0407baSopenharmony_ci * Maps the pages of the buffer in @pages into @vma. The caller is responsible
7383d0407baSopenharmony_ci * for verifying the correct size and protection of @vma beforehand.
7393d0407baSopenharmony_ci */
7403d0407baSopenharmony_cistatic int iommu_dma_mmap_ext(struct page **pages, size_t size, struct vm_area_struct *vma)
7413d0407baSopenharmony_ci{
7423d0407baSopenharmony_ci    return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
7433d0407baSopenharmony_ci}
7443d0407baSopenharmony_ci
7453d0407baSopenharmony_cistatic void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
7463d0407baSopenharmony_ci                                          enum dma_data_direction dir)
7473d0407baSopenharmony_ci{
7483d0407baSopenharmony_ci    phys_addr_t phys;
7493d0407baSopenharmony_ci
7503d0407baSopenharmony_ci    if (dev_is_dma_coherent(dev)) {
7513d0407baSopenharmony_ci        return;
7523d0407baSopenharmony_ci    }
7533d0407baSopenharmony_ci
7543d0407baSopenharmony_ci    phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
7553d0407baSopenharmony_ci    arch_sync_dma_for_cpu(phys, size, dir);
7563d0407baSopenharmony_ci}
7573d0407baSopenharmony_ci
7583d0407baSopenharmony_cistatic void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
7593d0407baSopenharmony_ci                                             enum dma_data_direction dir)
7603d0407baSopenharmony_ci{
7613d0407baSopenharmony_ci    phys_addr_t phys;
7623d0407baSopenharmony_ci
7633d0407baSopenharmony_ci    if (dev_is_dma_coherent(dev)) {
7643d0407baSopenharmony_ci        return;
7653d0407baSopenharmony_ci    }
7663d0407baSopenharmony_ci
7673d0407baSopenharmony_ci    phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
7683d0407baSopenharmony_ci    arch_sync_dma_for_device(phys, size, dir);
7693d0407baSopenharmony_ci}
7703d0407baSopenharmony_ci
7713d0407baSopenharmony_cistatic void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nelems,
7723d0407baSopenharmony_ci                                      enum dma_data_direction dir)
7733d0407baSopenharmony_ci{
7743d0407baSopenharmony_ci    struct scatterlist *sg;
7753d0407baSopenharmony_ci    int i;
7763d0407baSopenharmony_ci
7773d0407baSopenharmony_ci    if (dev_is_dma_coherent(dev)) {
7783d0407baSopenharmony_ci        return;
7793d0407baSopenharmony_ci    }
7803d0407baSopenharmony_ci
7813d0407baSopenharmony_ci    for_each_sg(sgl, sg, nelems, i) arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
7823d0407baSopenharmony_ci}
7833d0407baSopenharmony_ci
7843d0407baSopenharmony_cistatic void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nelems,
7853d0407baSopenharmony_ci                                         enum dma_data_direction dir)
7863d0407baSopenharmony_ci{
7873d0407baSopenharmony_ci    struct scatterlist *sg;
7883d0407baSopenharmony_ci    int i;
7893d0407baSopenharmony_ci
7903d0407baSopenharmony_ci    if (dev_is_dma_coherent(dev)) {
7913d0407baSopenharmony_ci        return;
7923d0407baSopenharmony_ci    }
7933d0407baSopenharmony_ci
7943d0407baSopenharmony_ci    for_each_sg(sgl, sg, nelems, i) arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
7953d0407baSopenharmony_ci}
7963d0407baSopenharmony_ci
7973d0407baSopenharmony_cistatic dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size,
7983d0407baSopenharmony_ci                                     enum dma_data_direction dir, unsigned long attrs)
7993d0407baSopenharmony_ci{
8003d0407baSopenharmony_ci    phys_addr_t phys = page_to_phys(page) + offset;
8013d0407baSopenharmony_ci    bool coherent = dev_is_dma_coherent(dev);
8023d0407baSopenharmony_ci    int prot = dma_info_to_prot(dir, coherent, attrs);
8033d0407baSopenharmony_ci    dma_addr_t dma_handle;
8043d0407baSopenharmony_ci
8053d0407baSopenharmony_ci    dma_handle = iommu_dma_map_ext(dev, phys, size, prot, dma_get_mask(dev));
8063d0407baSopenharmony_ci    if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) {
8073d0407baSopenharmony_ci        arch_sync_dma_for_device(phys, size, dir);
8083d0407baSopenharmony_ci    }
8093d0407baSopenharmony_ci    return dma_handle;
8103d0407baSopenharmony_ci}
8113d0407baSopenharmony_ci
8123d0407baSopenharmony_cistatic void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir,
8133d0407baSopenharmony_ci                                 unsigned long attrs)
8143d0407baSopenharmony_ci{
8153d0407baSopenharmony_ci    if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
8163d0407baSopenharmony_ci        iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
8173d0407baSopenharmony_ci    }
8183d0407baSopenharmony_ci    iommu_dma_unmap_ext(dev, dma_handle, size);
8193d0407baSopenharmony_ci}
8203d0407baSopenharmony_ci
8213d0407baSopenharmony_ci/*
8223d0407baSopenharmony_ci * Prepare a successfully-mapped scatterlist to give back to the caller.
8233d0407baSopenharmony_ci *
8243d0407baSopenharmony_ci * At this point the segments are already laid out by iommu_dma_map_sg() to
8253d0407baSopenharmony_ci * avoid individually crossing any boundaries, so we merely need to check a
8263d0407baSopenharmony_ci * segment's start address to avoid concatenating across one.
8273d0407baSopenharmony_ci */
8283d0407baSopenharmony_cistatic int finalise_sg_ext(struct device *dev, struct scatterlist *sg, int nents, dma_addr_t dma_addr)
8293d0407baSopenharmony_ci{
8303d0407baSopenharmony_ci    struct scatterlist *s, *cur = sg;
8313d0407baSopenharmony_ci    unsigned long seg_mask = dma_get_seg_boundary(dev);
8323d0407baSopenharmony_ci    unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
8333d0407baSopenharmony_ci    int i, count = 0;
8343d0407baSopenharmony_ci
8353d0407baSopenharmony_ci    for_each_sg(sg, s, nents, i)
8363d0407baSopenharmony_ci    {
8373d0407baSopenharmony_ci        /* Restore this segment's original unaligned fields first */
8383d0407baSopenharmony_ci        unsigned int s_iova_off = sg_dma_address(s);
8393d0407baSopenharmony_ci        unsigned int s_length = sg_dma_len(s);
8403d0407baSopenharmony_ci        unsigned int s_iova_len = s->length;
8413d0407baSopenharmony_ci
8423d0407baSopenharmony_ci        s->offset += s_iova_off;
8433d0407baSopenharmony_ci        s->length = s_length;
8443d0407baSopenharmony_ci        sg_dma_address(s) = DMA_MAPPING_ERROR;
8453d0407baSopenharmony_ci        sg_dma_len(s) = 0;
8463d0407baSopenharmony_ci
8473d0407baSopenharmony_ci        /*
8483d0407baSopenharmony_ci         * Now fill in the real DMA data. If...
8493d0407baSopenharmony_ci         * - there is a valid output segment to append to
8503d0407baSopenharmony_ci         * - and this segment starts on an IOVA page boundary
8513d0407baSopenharmony_ci         * - but doesn't fall at a segment boundary
8523d0407baSopenharmony_ci         * - and wouldn't make the resulting output segment too long
8533d0407baSopenharmony_ci         */
8543d0407baSopenharmony_ci        if (cur_len && !s_iova_off && (dma_addr & seg_mask) && (max_len - cur_len >= s_length)) {
8553d0407baSopenharmony_ci            /* ...then concatenate it with the previous one */
8563d0407baSopenharmony_ci            cur_len += s_length;
8573d0407baSopenharmony_ci        } else {
8583d0407baSopenharmony_ci            /* Otherwise start the next output segment */
8593d0407baSopenharmony_ci            if (i > 0) {
8603d0407baSopenharmony_ci                cur = sg_next(cur);
8613d0407baSopenharmony_ci            }
8623d0407baSopenharmony_ci            cur_len = s_length;
8633d0407baSopenharmony_ci            count++;
8643d0407baSopenharmony_ci
8653d0407baSopenharmony_ci            sg_dma_address(cur) = dma_addr + s_iova_off;
8663d0407baSopenharmony_ci        }
8673d0407baSopenharmony_ci
8683d0407baSopenharmony_ci        sg_dma_len(cur) = cur_len;
8693d0407baSopenharmony_ci        dma_addr += s_iova_len;
8703d0407baSopenharmony_ci
8713d0407baSopenharmony_ci        if (s_length + s_iova_off < s_iova_len) {
8723d0407baSopenharmony_ci            cur_len = 0;
8733d0407baSopenharmony_ci        }
8743d0407baSopenharmony_ci    }
8753d0407baSopenharmony_ci    return count;
8763d0407baSopenharmony_ci}
8773d0407baSopenharmony_ci
8783d0407baSopenharmony_ci/*
8793d0407baSopenharmony_ci * If mapping failed, then just restore the original list,
8803d0407baSopenharmony_ci * but making sure the DMA fields are invalidated.
8813d0407baSopenharmony_ci */
8823d0407baSopenharmony_cistatic void invalidate_sg_ext(struct scatterlist *sg, int nents)
8833d0407baSopenharmony_ci{
8843d0407baSopenharmony_ci    struct scatterlist *s;
8853d0407baSopenharmony_ci    int i;
8863d0407baSopenharmony_ci
8873d0407baSopenharmony_ci    for_each_sg(sg, s, nents, i)
8883d0407baSopenharmony_ci    {
8893d0407baSopenharmony_ci        if (sg_dma_address(s) != DMA_MAPPING_ERROR) {
8903d0407baSopenharmony_ci            s->offset += sg_dma_address(s);
8913d0407baSopenharmony_ci        }
8923d0407baSopenharmony_ci        if (sg_dma_len(s)) {
8933d0407baSopenharmony_ci            s->length = sg_dma_len(s);
8943d0407baSopenharmony_ci        }
8953d0407baSopenharmony_ci        sg_dma_address(s) = DMA_MAPPING_ERROR;
8963d0407baSopenharmony_ci        sg_dma_len(s) = 0;
8973d0407baSopenharmony_ci    }
8983d0407baSopenharmony_ci}
8993d0407baSopenharmony_ci
9003d0407baSopenharmony_ci/*
9013d0407baSopenharmony_ci * The DMA API client is passing in a scatterlist which could describe
9023d0407baSopenharmony_ci * any old buffer layout, but the IOMMU API requires everything to be
9033d0407baSopenharmony_ci * aligned to IOMMU pages. Hence the need for this complicated bit of
9043d0407baSopenharmony_ci * impedance-matching, to be able to hand off a suitably-aligned list,
9053d0407baSopenharmony_ci * but still preserve the original offsets and sizes for the caller.
9063d0407baSopenharmony_ci */
9073d0407baSopenharmony_cistatic int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
9083d0407baSopenharmony_ci                            unsigned long attrs)
9093d0407baSopenharmony_ci{
9103d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_dma_domain(dev);
9113d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
9123d0407baSopenharmony_ci    struct iova_domain *iovad = &cookie->iovad;
9133d0407baSopenharmony_ci    struct scatterlist *s, *prev = NULL;
9143d0407baSopenharmony_ci    int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
9153d0407baSopenharmony_ci    dma_addr_t iova;
9163d0407baSopenharmony_ci    size_t iova_len = 0;
9173d0407baSopenharmony_ci    unsigned long mask = dma_get_seg_boundary(dev);
9183d0407baSopenharmony_ci    int i;
9193d0407baSopenharmony_ci
9203d0407baSopenharmony_ci    if (unlikely(iommu_dma_deferred_attach(dev, domain))) {
9213d0407baSopenharmony_ci        return 0;
9223d0407baSopenharmony_ci    }
9233d0407baSopenharmony_ci
9243d0407baSopenharmony_ci    if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
9253d0407baSopenharmony_ci        iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
9263d0407baSopenharmony_ci    }
9273d0407baSopenharmony_ci
9283d0407baSopenharmony_ci    /*
9293d0407baSopenharmony_ci     * Work out how much IOVA space we need, and align the segments to
9303d0407baSopenharmony_ci     * IOVA granules for the IOMMU driver to handle. With some clever
9313d0407baSopenharmony_ci     * trickery we can modify the list in-place, but reversibly, by
9323d0407baSopenharmony_ci     * stashing the unaligned parts in the as-yet-unused DMA fields.
9333d0407baSopenharmony_ci     */
9343d0407baSopenharmony_ci    for_each_sg(sg, s, nents, i)
9353d0407baSopenharmony_ci    {
9363d0407baSopenharmony_ci        size_t s_iova_off = iova_offset(iovad, s->offset);
9373d0407baSopenharmony_ci        size_t s_length = s->length;
9383d0407baSopenharmony_ci        size_t pad_len = (mask - iova_len + 1) & mask;
9393d0407baSopenharmony_ci
9403d0407baSopenharmony_ci        sg_dma_address(s) = s_iova_off;
9413d0407baSopenharmony_ci        sg_dma_len(s) = s_length;
9423d0407baSopenharmony_ci        s->offset -= s_iova_off;
9433d0407baSopenharmony_ci        s_length = iova_align(iovad, s_length + s_iova_off);
9443d0407baSopenharmony_ci        s->length = s_length;
9453d0407baSopenharmony_ci
9463d0407baSopenharmony_ci        /*
9473d0407baSopenharmony_ci         * Due to the alignment of our single IOVA allocation, we can
9483d0407baSopenharmony_ci         * depend on these assumptions about the segment boundary mask:
9493d0407baSopenharmony_ci         * - If mask size >= IOVA size, then the IOVA range cannot
9503d0407baSopenharmony_ci         *   possibly fall across a boundary, so we don't care.
9513d0407baSopenharmony_ci         * - If mask size < IOVA size, then the IOVA range must start
9523d0407baSopenharmony_ci         *   exactly on a boundary, therefore we can lay things out
9533d0407baSopenharmony_ci         *   based purely on segment lengths without needing to know
9543d0407baSopenharmony_ci         *   the actual addresses beforehand.
9553d0407baSopenharmony_ci         * - The mask must be a power of 2, so pad_len == 0 if
9563d0407baSopenharmony_ci         *   iova_len == 0, thus we cannot dereference prev the first
9573d0407baSopenharmony_ci         *   time through here (i.e. before it has a meaningful value).
9583d0407baSopenharmony_ci         */
9593d0407baSopenharmony_ci        if (pad_len && pad_len < s_length - 1) {
9603d0407baSopenharmony_ci            prev->length += pad_len;
9613d0407baSopenharmony_ci            iova_len += pad_len;
9623d0407baSopenharmony_ci        }
9633d0407baSopenharmony_ci
9643d0407baSopenharmony_ci        iova_len += s_length;
9653d0407baSopenharmony_ci        prev = s;
9663d0407baSopenharmony_ci    }
9673d0407baSopenharmony_ci
9683d0407baSopenharmony_ci    iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
9693d0407baSopenharmony_ci    if (!iova) {
9703d0407baSopenharmony_ci        goto out_restore_sg;
9713d0407baSopenharmony_ci    }
9723d0407baSopenharmony_ci
9733d0407baSopenharmony_ci    /*
9743d0407baSopenharmony_ci     * We'll leave any physical concatenation to the IOMMU driver's
9753d0407baSopenharmony_ci     * implementation - it knows better than we do.
9763d0407baSopenharmony_ci     */
9773d0407baSopenharmony_ci    if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) {
9783d0407baSopenharmony_ci        goto out_free_iova;
9793d0407baSopenharmony_ci    }
9803d0407baSopenharmony_ci
9813d0407baSopenharmony_ci    return finalise_sg_ext(dev, sg, nents, iova);
9823d0407baSopenharmony_ci
9833d0407baSopenharmony_ciout_free_iova:
9843d0407baSopenharmony_ci    iommu_dma_free_iova(cookie, iova, iova_len);
9853d0407baSopenharmony_ciout_restore_sg:
9863d0407baSopenharmony_ci    invalidate_sg_ext(sg, nents);
9873d0407baSopenharmony_ci    return 0;
9883d0407baSopenharmony_ci}
9893d0407baSopenharmony_ci
9903d0407baSopenharmony_cistatic void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
9913d0407baSopenharmony_ci                               unsigned long attrs)
9923d0407baSopenharmony_ci{
9933d0407baSopenharmony_ci    dma_addr_t start, end;
9943d0407baSopenharmony_ci    struct scatterlist *tmp;
9953d0407baSopenharmony_ci    int i;
9963d0407baSopenharmony_ci
9973d0407baSopenharmony_ci    if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
9983d0407baSopenharmony_ci        iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
9993d0407baSopenharmony_ci    }
10003d0407baSopenharmony_ci
10013d0407baSopenharmony_ci    /*
10023d0407baSopenharmony_ci     * The scatterlist segments are mapped into a single
10033d0407baSopenharmony_ci     * contiguous IOVA allocation, so this is incredibly easy.
10043d0407baSopenharmony_ci     */
10053d0407baSopenharmony_ci    start = sg_dma_address(sg);
10063d0407baSopenharmony_ci    for_each_sg(sg_next(sg), tmp, nents - 1, i)
10073d0407baSopenharmony_ci    {
10083d0407baSopenharmony_ci        if (sg_dma_len(tmp) == 0) {
10093d0407baSopenharmony_ci            break;
10103d0407baSopenharmony_ci        }
10113d0407baSopenharmony_ci        sg = tmp;
10123d0407baSopenharmony_ci    }
10133d0407baSopenharmony_ci    end = sg_dma_address(sg) + sg_dma_len(sg);
10143d0407baSopenharmony_ci    iommu_dma_unmap_ext(dev, start, end - start);
10153d0407baSopenharmony_ci}
10163d0407baSopenharmony_ci
10173d0407baSopenharmony_cistatic dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir,
10183d0407baSopenharmony_ci                                         unsigned long attrs)
10193d0407baSopenharmony_ci{
10203d0407baSopenharmony_ci    return iommu_dma_map_ext(dev, phys, size, dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, dma_get_mask(dev));
10213d0407baSopenharmony_ci}
10223d0407baSopenharmony_ci
10233d0407baSopenharmony_cistatic void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir,
10243d0407baSopenharmony_ci                                     unsigned long attrs)
10253d0407baSopenharmony_ci{
10263d0407baSopenharmony_ci    iommu_dma_unmap_ext(dev, handle, size);
10273d0407baSopenharmony_ci}
10283d0407baSopenharmony_ci
10293d0407baSopenharmony_cistatic void iommu_dma_free_ext(struct device *dev, size_t size, void *cpu_addr)
10303d0407baSopenharmony_ci{
10313d0407baSopenharmony_ci    size_t alloc_size = PAGE_ALIGN(size);
10323d0407baSopenharmony_ci    int count = alloc_size >> PAGE_SHIFT;
10333d0407baSopenharmony_ci    struct page *page = NULL, **pages = NULL;
10343d0407baSopenharmony_ci
10353d0407baSopenharmony_ci    /* Non-coherent atomic allocation? Easy */
10363d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && dma_free_from_pool(dev, cpu_addr, alloc_size)) {
10373d0407baSopenharmony_ci        return;
10383d0407baSopenharmony_ci    }
10393d0407baSopenharmony_ci
10403d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
10413d0407baSopenharmony_ci        /*
10423d0407baSopenharmony_ci         * If it the address is remapped, then it's either non-coherent
10433d0407baSopenharmony_ci         * or highmem CMA, or an iommu_dma_alloc_remap() construction.
10443d0407baSopenharmony_ci         */
10453d0407baSopenharmony_ci        pages = dma_common_find_pages(cpu_addr);
10463d0407baSopenharmony_ci        if (!pages) {
10473d0407baSopenharmony_ci            page = vmalloc_to_page(cpu_addr);
10483d0407baSopenharmony_ci        }
10493d0407baSopenharmony_ci        dma_common_free_remap(cpu_addr, alloc_size);
10503d0407baSopenharmony_ci    } else {
10513d0407baSopenharmony_ci        /* Lowmem means a coherent atomic or CMA allocation */
10523d0407baSopenharmony_ci        page = virt_to_page(cpu_addr);
10533d0407baSopenharmony_ci    }
10543d0407baSopenharmony_ci
10553d0407baSopenharmony_ci    if (pages) {
10563d0407baSopenharmony_ci        iommu_dma_free_pages_ext(pages, count);
10573d0407baSopenharmony_ci    }
10583d0407baSopenharmony_ci    if (page) {
10593d0407baSopenharmony_ci        dma_free_contiguous(dev, page, alloc_size);
10603d0407baSopenharmony_ci    }
10613d0407baSopenharmony_ci}
10623d0407baSopenharmony_ci
10633d0407baSopenharmony_cistatic void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs)
10643d0407baSopenharmony_ci{
10653d0407baSopenharmony_ci    iommu_dma_unmap_ext(dev, handle, size);
10663d0407baSopenharmony_ci    iommu_dma_free_ext(dev, size, cpu_addr);
10673d0407baSopenharmony_ci}
10683d0407baSopenharmony_ci
10693d0407baSopenharmony_cistatic void *iommu_dma_alloc_pages(struct device *dev, size_t size, struct page **pagep, gfp_t gfp, unsigned long attrs)
10703d0407baSopenharmony_ci{
10713d0407baSopenharmony_ci    bool coherent = dev_is_dma_coherent(dev);
10723d0407baSopenharmony_ci    size_t alloc_size = PAGE_ALIGN(size);
10733d0407baSopenharmony_ci    int node = dev_to_node(dev);
10743d0407baSopenharmony_ci    struct page *page = NULL;
10753d0407baSopenharmony_ci    void *cpu_addr;
10763d0407baSopenharmony_ci
10773d0407baSopenharmony_ci    page = dma_alloc_contiguous(dev, alloc_size, gfp);
10783d0407baSopenharmony_ci    if (!page) {
10793d0407baSopenharmony_ci        page = alloc_pages_node(node, gfp, get_order(alloc_size));
10803d0407baSopenharmony_ci    }
10813d0407baSopenharmony_ci    if (!page) {
10823d0407baSopenharmony_ci        return NULL;
10833d0407baSopenharmony_ci    }
10843d0407baSopenharmony_ci
10853d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
10863d0407baSopenharmony_ci        pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
10873d0407baSopenharmony_ci
10883d0407baSopenharmony_ci        cpu_addr = dma_common_contiguous_remap(page, alloc_size, prot, __builtin_return_address(0));
10893d0407baSopenharmony_ci        if (!cpu_addr) {
10903d0407baSopenharmony_ci            goto out_free_pages;
10913d0407baSopenharmony_ci        }
10923d0407baSopenharmony_ci
10933d0407baSopenharmony_ci        if (!coherent) {
10943d0407baSopenharmony_ci            arch_dma_prep_coherent(page, size);
10953d0407baSopenharmony_ci        }
10963d0407baSopenharmony_ci    } else {
10973d0407baSopenharmony_ci        cpu_addr = page_address(page);
10983d0407baSopenharmony_ci    }
10993d0407baSopenharmony_ci
11003d0407baSopenharmony_ci    *pagep = page;
11013d0407baSopenharmony_ci    memset(cpu_addr, 0, alloc_size);
11023d0407baSopenharmony_ci    return cpu_addr;
11033d0407baSopenharmony_ciout_free_pages:
11043d0407baSopenharmony_ci    dma_free_contiguous(dev, page, alloc_size);
11053d0407baSopenharmony_ci    return NULL;
11063d0407baSopenharmony_ci}
11073d0407baSopenharmony_ci
11083d0407baSopenharmony_cistatic void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
11093d0407baSopenharmony_ci{
11103d0407baSopenharmony_ci    bool coherent = dev_is_dma_coherent(dev);
11113d0407baSopenharmony_ci    int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
11123d0407baSopenharmony_ci    struct page *page = NULL;
11133d0407baSopenharmony_ci    void *cpu_addr;
11143d0407baSopenharmony_ci
11153d0407baSopenharmony_ci    gfp |= __GFP_ZERO;
11163d0407baSopenharmony_ci
11173d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
11183d0407baSopenharmony_ci        return iommu_dma_alloc_remap(dev, size, handle, gfp, dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
11193d0407baSopenharmony_ci    }
11203d0407baSopenharmony_ci
11213d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !gfpflags_allow_blocking(gfp) && !coherent) {
11223d0407baSopenharmony_ci        page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, gfp, NULL);
11233d0407baSopenharmony_ci    } else {
11243d0407baSopenharmony_ci        cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
11253d0407baSopenharmony_ci    }
11263d0407baSopenharmony_ci    if (!cpu_addr) {
11273d0407baSopenharmony_ci        return NULL;
11283d0407baSopenharmony_ci    }
11293d0407baSopenharmony_ci
11303d0407baSopenharmony_ci    *handle = iommu_dma_map_ext(dev, page_to_phys(page), size, ioprot, dev->coherent_dma_mask);
11313d0407baSopenharmony_ci    if (*handle == DMA_MAPPING_ERROR) {
11323d0407baSopenharmony_ci        iommu_dma_free_ext(dev, size, cpu_addr);
11333d0407baSopenharmony_ci        return NULL;
11343d0407baSopenharmony_ci    }
11353d0407baSopenharmony_ci
11363d0407baSopenharmony_ci    return cpu_addr;
11373d0407baSopenharmony_ci}
11383d0407baSopenharmony_ci
11393d0407baSopenharmony_ci#ifdef CONFIG_DMA_REMAP
11403d0407baSopenharmony_cistatic void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle,
11413d0407baSopenharmony_ci                                         enum dma_data_direction dir, gfp_t gfp)
11423d0407baSopenharmony_ci{
11433d0407baSopenharmony_ci    if (!gfpflags_allow_blocking(gfp)) {
11443d0407baSopenharmony_ci        struct page *page;
11453d0407baSopenharmony_ci
11463d0407baSopenharmony_ci        page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
11473d0407baSopenharmony_ci        if (!page) {
11483d0407baSopenharmony_ci            return NULL;
11493d0407baSopenharmony_ci        }
11503d0407baSopenharmony_ci        return page_address(page);
11513d0407baSopenharmony_ci    }
11523d0407baSopenharmony_ci
11533d0407baSopenharmony_ci    return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, PAGE_KERNEL, 0);
11543d0407baSopenharmony_ci}
11553d0407baSopenharmony_ci
11563d0407baSopenharmony_cistatic void iommu_dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle,
11573d0407baSopenharmony_ci                                       enum dma_data_direction dir)
11583d0407baSopenharmony_ci{
11593d0407baSopenharmony_ci    iommu_dma_unmap_ext(dev, handle, size);
11603d0407baSopenharmony_ci    iommu_dma_free_ext(dev, size, cpu_addr);
11613d0407baSopenharmony_ci}
11623d0407baSopenharmony_ci#else
11633d0407baSopenharmony_ci#define iommu_dma_alloc_noncoherent NULL
11643d0407baSopenharmony_ci#define iommu_dma_free_noncoherent NULL
11653d0407baSopenharmony_ci#endif /* CONFIG_DMA_REMAP */
11663d0407baSopenharmony_ci
11673d0407baSopenharmony_cistatic int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr,
11683d0407baSopenharmony_ci                          size_t size, unsigned long attrs)
11693d0407baSopenharmony_ci{
11703d0407baSopenharmony_ci    unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
11713d0407baSopenharmony_ci    unsigned long pfn, off = vma->vm_pgoff;
11723d0407baSopenharmony_ci    int ret;
11733d0407baSopenharmony_ci
11743d0407baSopenharmony_ci    vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
11753d0407baSopenharmony_ci
11763d0407baSopenharmony_ci    if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) {
11773d0407baSopenharmony_ci        return ret;
11783d0407baSopenharmony_ci    }
11793d0407baSopenharmony_ci
11803d0407baSopenharmony_ci    if (off >= nr_pages || vma_pages(vma) > nr_pages - off) {
11813d0407baSopenharmony_ci        return -ENXIO;
11823d0407baSopenharmony_ci    }
11833d0407baSopenharmony_ci
11843d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
11853d0407baSopenharmony_ci        struct page **pages = dma_common_find_pages(cpu_addr);
11863d0407baSopenharmony_ci
11873d0407baSopenharmony_ci        if (pages) {
11883d0407baSopenharmony_ci            return iommu_dma_mmap_ext(pages, size, vma);
11893d0407baSopenharmony_ci        }
11903d0407baSopenharmony_ci        pfn = vmalloc_to_pfn(cpu_addr);
11913d0407baSopenharmony_ci    } else {
11923d0407baSopenharmony_ci        pfn = page_to_pfn(virt_to_page(cpu_addr));
11933d0407baSopenharmony_ci    }
11943d0407baSopenharmony_ci
11953d0407baSopenharmony_ci    return remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot);
11963d0407baSopenharmony_ci}
11973d0407baSopenharmony_ci
11983d0407baSopenharmony_cistatic int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
11993d0407baSopenharmony_ci                                 size_t size, unsigned long attrs)
12003d0407baSopenharmony_ci{
12013d0407baSopenharmony_ci    struct page *page;
12023d0407baSopenharmony_ci    int ret;
12033d0407baSopenharmony_ci
12043d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
12053d0407baSopenharmony_ci        struct page **pages = dma_common_find_pages(cpu_addr);
12063d0407baSopenharmony_ci
12073d0407baSopenharmony_ci        if (pages) {
12083d0407baSopenharmony_ci            return sg_alloc_table_from_pages(sgt, pages, PAGE_ALIGN(size) >> PAGE_SHIFT, 0, size, GFP_KERNEL);
12093d0407baSopenharmony_ci        }
12103d0407baSopenharmony_ci
12113d0407baSopenharmony_ci        page = vmalloc_to_page(cpu_addr);
12123d0407baSopenharmony_ci    } else {
12133d0407baSopenharmony_ci        page = virt_to_page(cpu_addr);
12143d0407baSopenharmony_ci    }
12153d0407baSopenharmony_ci
12163d0407baSopenharmony_ci    ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
12173d0407baSopenharmony_ci    if (!ret) {
12183d0407baSopenharmony_ci        sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
12193d0407baSopenharmony_ci    }
12203d0407baSopenharmony_ci    return ret;
12213d0407baSopenharmony_ci}
12223d0407baSopenharmony_ci
12233d0407baSopenharmony_cistatic unsigned long iommu_dma_get_merge_boundary(struct device *dev)
12243d0407baSopenharmony_ci{
12253d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_dma_domain(dev);
12263d0407baSopenharmony_ci
12273d0407baSopenharmony_ci    return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
12283d0407baSopenharmony_ci}
12293d0407baSopenharmony_ci
12303d0407baSopenharmony_cistatic const struct dma_map_ops iommu_dma_ops = {
12313d0407baSopenharmony_ci    .alloc = iommu_dma_alloc,
12323d0407baSopenharmony_ci    .free = iommu_dma_free,
12333d0407baSopenharmony_ci    .alloc_pages = dma_common_alloc_pages,
12343d0407baSopenharmony_ci    .free_pages = dma_common_free_pages,
12353d0407baSopenharmony_ci    .alloc_noncoherent = iommu_dma_alloc_noncoherent,
12363d0407baSopenharmony_ci    .free_noncoherent = iommu_dma_free_noncoherent,
12373d0407baSopenharmony_ci    .mmap = iommu_dma_mmap,
12383d0407baSopenharmony_ci    .get_sgtable = iommu_dma_get_sgtable,
12393d0407baSopenharmony_ci    .map_page = iommu_dma_map_page,
12403d0407baSopenharmony_ci    .unmap_page = iommu_dma_unmap_page,
12413d0407baSopenharmony_ci    .map_sg = iommu_dma_map_sg,
12423d0407baSopenharmony_ci    .unmap_sg = iommu_dma_unmap_sg,
12433d0407baSopenharmony_ci    .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
12443d0407baSopenharmony_ci    .sync_single_for_device = iommu_dma_sync_single_for_device,
12453d0407baSopenharmony_ci    .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
12463d0407baSopenharmony_ci    .sync_sg_for_device = iommu_dma_sync_sg_for_device,
12473d0407baSopenharmony_ci    .map_resource = iommu_dma_map_resource,
12483d0407baSopenharmony_ci    .unmap_resource = iommu_dma_unmap_resource,
12493d0407baSopenharmony_ci    .get_merge_boundary = iommu_dma_get_merge_boundary,
12503d0407baSopenharmony_ci};
12513d0407baSopenharmony_ci
12523d0407baSopenharmony_ci/*
12533d0407baSopenharmony_ci * The IOMMU core code allocates the default DMA domain, which the underlying
12543d0407baSopenharmony_ci * IOMMU driver needs to support via the dma-iommu layer.
12553d0407baSopenharmony_ci */
12563d0407baSopenharmony_civoid iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
12573d0407baSopenharmony_ci{
12583d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
12593d0407baSopenharmony_ci
12603d0407baSopenharmony_ci    if (!domain) {
12613d0407baSopenharmony_ci        goto out_err;
12623d0407baSopenharmony_ci    }
12633d0407baSopenharmony_ci
12643d0407baSopenharmony_ci    /*
12653d0407baSopenharmony_ci     * The IOMMU core code allocates the default DMA domain, which the
12663d0407baSopenharmony_ci     * underlying IOMMU driver needs to support via the dma-iommu layer.
12673d0407baSopenharmony_ci     */
12683d0407baSopenharmony_ci    if (domain->type == IOMMU_DOMAIN_DMA) {
12693d0407baSopenharmony_ci        if (iommu_dma_init_domain(domain, dma_base, size, dev)) {
12703d0407baSopenharmony_ci            goto out_err;
12713d0407baSopenharmony_ci        }
12723d0407baSopenharmony_ci        dev->dma_ops = &iommu_dma_ops;
12733d0407baSopenharmony_ci    }
12743d0407baSopenharmony_ci
12753d0407baSopenharmony_ci    return;
12763d0407baSopenharmony_ciout_err:
12773d0407baSopenharmony_ci    pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", dev_name(dev));
12783d0407baSopenharmony_ci}
12793d0407baSopenharmony_ci
12803d0407baSopenharmony_cistatic struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, phys_addr_t msi_addr,
12813d0407baSopenharmony_ci                                                         struct iommu_domain *domain)
12823d0407baSopenharmony_ci{
12833d0407baSopenharmony_ci    struct iommu_dma_cookie *cookie = domain->iova_cookie;
12843d0407baSopenharmony_ci    struct iommu_dma_msi_page *msi_page;
12853d0407baSopenharmony_ci    dma_addr_t iova;
12863d0407baSopenharmony_ci    int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
12873d0407baSopenharmony_ci    size_t size = cookie_msi_granule(cookie);
12883d0407baSopenharmony_ci
12893d0407baSopenharmony_ci    msi_addr &= ~(phys_addr_t)(size - 1);
12903d0407baSopenharmony_ci    list_for_each_entry(msi_page, &cookie->msi_page_list, list) if (msi_page->phys == msi_addr) return msi_page;
12913d0407baSopenharmony_ci
12923d0407baSopenharmony_ci    msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
12933d0407baSopenharmony_ci    if (!msi_page) {
12943d0407baSopenharmony_ci        return NULL;
12953d0407baSopenharmony_ci    }
12963d0407baSopenharmony_ci
12973d0407baSopenharmony_ci    iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
12983d0407baSopenharmony_ci    if (!iova) {
12993d0407baSopenharmony_ci        goto out_free_page;
13003d0407baSopenharmony_ci    }
13013d0407baSopenharmony_ci
13023d0407baSopenharmony_ci    if (iommu_map(domain, iova, msi_addr, size, prot)) {
13033d0407baSopenharmony_ci        goto out_free_iova;
13043d0407baSopenharmony_ci    }
13053d0407baSopenharmony_ci
13063d0407baSopenharmony_ci    INIT_LIST_HEAD(&msi_page->list);
13073d0407baSopenharmony_ci    msi_page->phys = msi_addr;
13083d0407baSopenharmony_ci    msi_page->iova = iova;
13093d0407baSopenharmony_ci    list_add(&msi_page->list, &cookie->msi_page_list);
13103d0407baSopenharmony_ci    return msi_page;
13113d0407baSopenharmony_ci
13123d0407baSopenharmony_ciout_free_iova:
13133d0407baSopenharmony_ci    iommu_dma_free_iova(cookie, iova, size);
13143d0407baSopenharmony_ciout_free_page:
13153d0407baSopenharmony_ci    kfree(msi_page);
13163d0407baSopenharmony_ci    return NULL;
13173d0407baSopenharmony_ci}
13183d0407baSopenharmony_ci
13193d0407baSopenharmony_ciint iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
13203d0407baSopenharmony_ci{
13213d0407baSopenharmony_ci    struct device *dev = msi_desc_to_dev(desc);
13223d0407baSopenharmony_ci    struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
13233d0407baSopenharmony_ci    struct iommu_dma_msi_page *msi_page;
13243d0407baSopenharmony_ci    static DEFINE_MUTEX(msi_prepare_lock); /* see below */
13253d0407baSopenharmony_ci
13263d0407baSopenharmony_ci    if (!domain || !domain->iova_cookie) {
13273d0407baSopenharmony_ci        desc->iommu_cookie = NULL;
13283d0407baSopenharmony_ci        return 0;
13293d0407baSopenharmony_ci    }
13303d0407baSopenharmony_ci
13313d0407baSopenharmony_ci    /*
13323d0407baSopenharmony_ci     * In fact the whole prepare operation should already be serialised by
13333d0407baSopenharmony_ci     * irq_domain_mutex further up the callchain, but that's pretty subtle
13343d0407baSopenharmony_ci     * on its own, so consider this locking as failsafe documentation...
13353d0407baSopenharmony_ci     */
13363d0407baSopenharmony_ci    mutex_lock(&msi_prepare_lock);
13373d0407baSopenharmony_ci    msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
13383d0407baSopenharmony_ci    mutex_unlock(&msi_prepare_lock);
13393d0407baSopenharmony_ci
13403d0407baSopenharmony_ci    msi_desc_set_iommu_cookie(desc, msi_page);
13413d0407baSopenharmony_ci
13423d0407baSopenharmony_ci    if (!msi_page) {
13433d0407baSopenharmony_ci        return -ENOMEM;
13443d0407baSopenharmony_ci    }
13453d0407baSopenharmony_ci    return 0;
13463d0407baSopenharmony_ci}
13473d0407baSopenharmony_ci
13483d0407baSopenharmony_civoid iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
13493d0407baSopenharmony_ci{
13503d0407baSopenharmony_ci    struct device *dev = msi_desc_to_dev(desc);
13513d0407baSopenharmony_ci    const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
13523d0407baSopenharmony_ci    const struct iommu_dma_msi_page *msi_page;
13533d0407baSopenharmony_ci
13543d0407baSopenharmony_ci    msi_page = msi_desc_get_iommu_cookie(desc);
13553d0407baSopenharmony_ci    if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) {
13563d0407baSopenharmony_ci        return;
13573d0407baSopenharmony_ci    }
13583d0407baSopenharmony_ci
13593d0407baSopenharmony_ci    msg->address_hi = upper_32_bits(msi_page->iova);
13603d0407baSopenharmony_ci    msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
13613d0407baSopenharmony_ci    msg->address_lo += lower_32_bits(msi_page->iova);
13623d0407baSopenharmony_ci}
13633d0407baSopenharmony_ci
13643d0407baSopenharmony_cistatic int iommu_dma_init(void)
13653d0407baSopenharmony_ci{
13663d0407baSopenharmony_ci    return iova_cache_get();
13673d0407baSopenharmony_ci}
13683d0407baSopenharmony_ciarch_initcall(iommu_dma_init);
1369