13d0407baSopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 23d0407baSopenharmony_ci/* 33d0407baSopenharmony_ci * IOMMU API for Rockchip 43d0407baSopenharmony_ci * 53d0407baSopenharmony_ci * Module Authors: Simon Xue <xxm@rock-chips.com> 63d0407baSopenharmony_ci * Daniel Kurtz <djkurtz@chromium.org> 73d0407baSopenharmony_ci */ 83d0407baSopenharmony_ci 93d0407baSopenharmony_ci#include <soc/rockchip/rockchip_iommu.h> 103d0407baSopenharmony_ci 113d0407baSopenharmony_ci#include <linux/clk.h> 123d0407baSopenharmony_ci#include <linux/compiler.h> 133d0407baSopenharmony_ci#include <linux/delay.h> 143d0407baSopenharmony_ci#include <linux/device.h> 153d0407baSopenharmony_ci#include <linux/dma-iommu.h> 163d0407baSopenharmony_ci#include <linux/dma-mapping.h> 173d0407baSopenharmony_ci#include <linux/errno.h> 183d0407baSopenharmony_ci#include <linux/interrupt.h> 193d0407baSopenharmony_ci#include <linux/io.h> 203d0407baSopenharmony_ci#include <linux/iommu.h> 213d0407baSopenharmony_ci#include <linux/iopoll.h> 223d0407baSopenharmony_ci#include <linux/list.h> 233d0407baSopenharmony_ci#include <linux/mm.h> 243d0407baSopenharmony_ci#include <linux/module.h> 253d0407baSopenharmony_ci#include <linux/init.h> 263d0407baSopenharmony_ci#include <linux/of.h> 273d0407baSopenharmony_ci#include <linux/of_iommu.h> 283d0407baSopenharmony_ci#include <linux/of_platform.h> 293d0407baSopenharmony_ci#include <linux/platform_device.h> 303d0407baSopenharmony_ci#include <linux/pm_runtime.h> 313d0407baSopenharmony_ci#include <linux/slab.h> 323d0407baSopenharmony_ci#include <linux/spinlock.h> 333d0407baSopenharmony_ci 343d0407baSopenharmony_ci/** MMU register offsets */ 353d0407baSopenharmony_ci#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ 363d0407baSopenharmony_ci#define RK_MMU_STATUS 0x04 373d0407baSopenharmony_ci#define RK_MMU_COMMAND 0x08 383d0407baSopenharmony_ci#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ 393d0407baSopenharmony_ci#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ 403d0407baSopenharmony_ci#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ 413d0407baSopenharmony_ci#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ 423d0407baSopenharmony_ci#define RK_MMU_INT_MASK 0x1C /* IRQ enable */ 433d0407baSopenharmony_ci#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ 443d0407baSopenharmony_ci#define RK_MMU_AUTO_GATING 0x24 453d0407baSopenharmony_ci 463d0407baSopenharmony_ci#define DTE_ADDR_DUMMY 0xCAFEBABE 473d0407baSopenharmony_ci 483d0407baSopenharmony_ci#define RK_MMU_POLL_PERIOD_US 100 493d0407baSopenharmony_ci#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000 503d0407baSopenharmony_ci#define RK_MMU_POLL_TIMEOUT_US 1000 513d0407baSopenharmony_ci 523d0407baSopenharmony_ci/* RK_MMU_STATUS fields */ 533d0407baSopenharmony_ci#define RK_MMU_STATUS_PAGING_ENABLED BIT(0) 543d0407baSopenharmony_ci#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) 553d0407baSopenharmony_ci#define RK_MMU_STATUS_STALL_ACTIVE BIT(2) 563d0407baSopenharmony_ci#define RK_MMU_STATUS_IDLE BIT(3) 573d0407baSopenharmony_ci#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) 583d0407baSopenharmony_ci#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) 593d0407baSopenharmony_ci#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) 603d0407baSopenharmony_ci 613d0407baSopenharmony_ci/* RK_MMU_COMMAND command values */ 623d0407baSopenharmony_ci#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ 633d0407baSopenharmony_ci#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ 643d0407baSopenharmony_ci#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ 653d0407baSopenharmony_ci#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ 663d0407baSopenharmony_ci#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ 673d0407baSopenharmony_ci#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ 683d0407baSopenharmony_ci#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ 693d0407baSopenharmony_ci 703d0407baSopenharmony_ci/* RK_MMU_INT_* register fields */ 713d0407baSopenharmony_ci#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ 723d0407baSopenharmony_ci#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ 733d0407baSopenharmony_ci#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) 743d0407baSopenharmony_ci 753d0407baSopenharmony_ci#define NUM_DT_ENTRIES 1024 763d0407baSopenharmony_ci#define NUM_PT_ENTRIES 1024 773d0407baSopenharmony_ci 783d0407baSopenharmony_ci#define SPAGE_ORDER 12 793d0407baSopenharmony_ci#define SPAGE_SIZE (1 << SPAGE_ORDER) 803d0407baSopenharmony_ci 813d0407baSopenharmony_ci#define DISABLE_FETCH_DTE_TIME_LIMIT BIT(31) 823d0407baSopenharmony_ci 833d0407baSopenharmony_ci#define CMD_RETRY_COUNT 10 843d0407baSopenharmony_ci 853d0407baSopenharmony_ci/* 863d0407baSopenharmony_ci * Support mapping any size that fits in one page table: 873d0407baSopenharmony_ci * 4 KiB to 4 MiB 883d0407baSopenharmony_ci */ 893d0407baSopenharmony_ci#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 903d0407baSopenharmony_ci 913d0407baSopenharmony_ci#define DT_LO_MASK 0xfffff000 923d0407baSopenharmony_ci#define DT_HI_MASK GENMASK_ULL(39, 32) 933d0407baSopenharmony_ci#define DT_SHIFT 28 943d0407baSopenharmony_ci 953d0407baSopenharmony_ci#define DTE_BASE_HI_MASK GENMASK(11, 4) 963d0407baSopenharmony_ci 973d0407baSopenharmony_ci#define PAGE_DESC_LO_MASK 0xfffff000 983d0407baSopenharmony_ci#define PAGE_DESC_HI1_LOWER 32 993d0407baSopenharmony_ci#define PAGE_DESC_HI1_UPPER 35 1003d0407baSopenharmony_ci#define PAGE_DESC_HI2_LOWER 36 1013d0407baSopenharmony_ci#define PAGE_DESC_HI2_UPPER 39 1023d0407baSopenharmony_ci#define PAGE_DESC_HI_MASK1 GENMASK_ULL(PAGE_DESC_HI1_UPPER, PAGE_DESC_HI1_LOWER) 1033d0407baSopenharmony_ci#define PAGE_DESC_HI_MASK2 GENMASK_ULL(PAGE_DESC_HI2_UPPER, PAGE_DESC_HI2_LOWER) 1043d0407baSopenharmony_ci 1053d0407baSopenharmony_ci#define DTE_HI1_LOWER 8 1063d0407baSopenharmony_ci#define DTE_HI1_UPPER 11 1073d0407baSopenharmony_ci#define DTE_HI2_LOWER 4 1083d0407baSopenharmony_ci#define DTE_HI2_UPPER 7 1093d0407baSopenharmony_ci#define DTE_HI_MASK1 GENMASK(DTE_HI1_UPPER, DTE_HI1_LOWER) 1103d0407baSopenharmony_ci#define DTE_HI_MASK2 GENMASK(DTE_HI2_UPPER, DTE_HI2_LOWER) 1113d0407baSopenharmony_ci 1123d0407baSopenharmony_ci#define PAGE_DESC_HI_SHIFT1 (PAGE_DESC_HI1_LOWER - DTE_HI1_LOWER) 1133d0407baSopenharmony_ci#define PAGE_DESC_HI_SHIFT2 (PAGE_DESC_HI2_LOWER - DTE_HI2_LOWER) 1143d0407baSopenharmony_ci 1153d0407baSopenharmony_ci#define RK_IOMMU_VERSION_CMP 0x02 1163d0407baSopenharmony_ci#define RK_ADDR_PHYS_MUL 4 1173d0407baSopenharmony_ci#define RK_DMA_BIT_MASK 32 1183d0407baSopenharmony_ci 1193d0407baSopenharmony_cistruct rk_iommu_domain { 1203d0407baSopenharmony_ci struct list_head iommus; 1213d0407baSopenharmony_ci u32 *dt; /* page directory table */ 1223d0407baSopenharmony_ci dma_addr_t dt_dma; 1233d0407baSopenharmony_ci spinlock_t iommus_lock; /* lock for iommus list */ 1243d0407baSopenharmony_ci spinlock_t dt_lock; /* lock for modifying page directory table */ 1253d0407baSopenharmony_ci bool shootdown_entire; 1263d0407baSopenharmony_ci 1273d0407baSopenharmony_ci struct iommu_domain domain; 1283d0407baSopenharmony_ci}; 1293d0407baSopenharmony_ci 1303d0407baSopenharmony_cistruct rockchip_iommu_data { 1313d0407baSopenharmony_ci u32 version; 1323d0407baSopenharmony_ci}; 1333d0407baSopenharmony_ci 1343d0407baSopenharmony_cistruct rk_iommu { 1353d0407baSopenharmony_ci struct device *dev; 1363d0407baSopenharmony_ci void __iomem **bases; 1373d0407baSopenharmony_ci int num_mmu; 1383d0407baSopenharmony_ci int num_irq; 1393d0407baSopenharmony_ci struct clk_bulk_data *clocks; 1403d0407baSopenharmony_ci int num_clocks; 1413d0407baSopenharmony_ci bool reset_disabled; 1423d0407baSopenharmony_ci bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */ 1433d0407baSopenharmony_ci bool dlr_disable; /* avoid access iommu when runtime ops called */ 1443d0407baSopenharmony_ci bool cmd_retry; 1453d0407baSopenharmony_ci struct iommu_device iommu; 1463d0407baSopenharmony_ci struct list_head node; /* entry in rk_iommu_domain.iommus */ 1473d0407baSopenharmony_ci struct iommu_domain *domain; /* domain to which iommu is attached */ 1483d0407baSopenharmony_ci struct iommu_group *group; 1493d0407baSopenharmony_ci u32 version; 1503d0407baSopenharmony_ci bool shootdown_entire; 1513d0407baSopenharmony_ci}; 1523d0407baSopenharmony_ci 1533d0407baSopenharmony_cistruct rk_iommudata { 1543d0407baSopenharmony_ci struct device_link *link; /* runtime PM link from IOMMU to master */ 1553d0407baSopenharmony_ci struct rk_iommu *iommu; 1563d0407baSopenharmony_ci bool defer_attach; 1573d0407baSopenharmony_ci}; 1583d0407baSopenharmony_ci 1593d0407baSopenharmony_cistatic struct device *dma_dev; 1603d0407baSopenharmony_ci 1613d0407baSopenharmony_cistatic inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, unsigned int count) 1623d0407baSopenharmony_ci{ 1633d0407baSopenharmony_ci size_t size = count * sizeof(u32); /* count of u32 entry */ 1643d0407baSopenharmony_ci 1653d0407baSopenharmony_ci dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE); 1663d0407baSopenharmony_ci} 1673d0407baSopenharmony_ci 1683d0407baSopenharmony_cistatic struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) 1693d0407baSopenharmony_ci{ 1703d0407baSopenharmony_ci return container_of(dom, struct rk_iommu_domain, domain); 1713d0407baSopenharmony_ci} 1723d0407baSopenharmony_ci 1733d0407baSopenharmony_ci/* 1743d0407baSopenharmony_ci * The Rockchip rk3288 iommu uses a 2-level page table. 1753d0407baSopenharmony_ci * The first level is the "Directory Table" (DT). 1763d0407baSopenharmony_ci * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing 1773d0407baSopenharmony_ci * to a "Page Table". 1783d0407baSopenharmony_ci * The second level is the 1024 Page Tables (PT). 1793d0407baSopenharmony_ci * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to 1803d0407baSopenharmony_ci * a 4 KB page of physical memory. 1813d0407baSopenharmony_ci * 1823d0407baSopenharmony_ci * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). 1833d0407baSopenharmony_ci * Each iommu device has a MMU_DTE_ADDR register that contains the physical 1843d0407baSopenharmony_ci * address of the start of the DT page. 1853d0407baSopenharmony_ci * 1863d0407baSopenharmony_ci * The structure of the page table is as follows: 1873d0407baSopenharmony_ci * 1883d0407baSopenharmony_ci * DT 1893d0407baSopenharmony_ci * MMU_DTE_ADDR -> +-----+ 1903d0407baSopenharmony_ci * | | 1913d0407baSopenharmony_ci * +-----+ PT 1923d0407baSopenharmony_ci * | DTE | -> +-----+ 1933d0407baSopenharmony_ci * +-----+ | | Memory 1943d0407baSopenharmony_ci * | | +-----+ Page 1953d0407baSopenharmony_ci * | | | PTE | -> +-----+ 1963d0407baSopenharmony_ci * +-----+ +-----+ | | 1973d0407baSopenharmony_ci * | | | | 1983d0407baSopenharmony_ci * | | | | 1993d0407baSopenharmony_ci * +-----+ | | 2003d0407baSopenharmony_ci * | | 2013d0407baSopenharmony_ci * | | 2023d0407baSopenharmony_ci * +-----+ 2033d0407baSopenharmony_ci */ 2043d0407baSopenharmony_ci 2053d0407baSopenharmony_ci/* 2063d0407baSopenharmony_ci * Each DTE has a PT address and a valid bit: 2073d0407baSopenharmony_ci * +---------------------+-----------+-+ 2083d0407baSopenharmony_ci * | PT address | Reserved |V| 2093d0407baSopenharmony_ci * +---------------------+-----------+-+ 2103d0407baSopenharmony_ci * 31:12 - PT address (PTs always starts on a 4 KB boundary) 2113d0407baSopenharmony_ci * 11: 1 - Reserved 2123d0407baSopenharmony_ci * 0 - 1 if PT @ PT address is valid 2133d0407baSopenharmony_ci */ 2143d0407baSopenharmony_ci#define RK_DTE_PT_ADDRESS_MASK 0xfffff000 2153d0407baSopenharmony_ci#define RK_DTE_PT_VALID BIT(0) 2163d0407baSopenharmony_ci 2173d0407baSopenharmony_ci/* 2183d0407baSopenharmony_ci * In v2: 2193d0407baSopenharmony_ci * 31:12 - PT address bit 31:0 2203d0407baSopenharmony_ci * 11: 8 - PT address bit 35:32 2213d0407baSopenharmony_ci * 7: 4 - PT address bit 39:36 2223d0407baSopenharmony_ci * 3: 1 - Reserved 2233d0407baSopenharmony_ci * 0 - 1 if PT @ PT address is valid 2243d0407baSopenharmony_ci */ 2253d0407baSopenharmony_ci#define RK_DTE_PT_ADDRESS_MASK_V2 0xfffffff0 2263d0407baSopenharmony_ci 2273d0407baSopenharmony_cistatic inline phys_addr_t rk_dte_pt_address(u32 dte) 2283d0407baSopenharmony_ci{ 2293d0407baSopenharmony_ci return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; 2303d0407baSopenharmony_ci} 2313d0407baSopenharmony_ci 2323d0407baSopenharmony_cistatic inline phys_addr_t rk_dte_pt_address_v2(u32 dte) 2333d0407baSopenharmony_ci{ 2343d0407baSopenharmony_ci u64 dte_v2 = dte; 2353d0407baSopenharmony_ci 2363d0407baSopenharmony_ci dte_v2 = ((dte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) | ((dte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) | 2373d0407baSopenharmony_ci (dte_v2 & PAGE_DESC_LO_MASK); 2383d0407baSopenharmony_ci 2393d0407baSopenharmony_ci return (phys_addr_t)dte_v2; 2403d0407baSopenharmony_ci} 2413d0407baSopenharmony_ci 2423d0407baSopenharmony_cistatic inline bool rk_dte_is_pt_valid(u32 dte) 2433d0407baSopenharmony_ci{ 2443d0407baSopenharmony_ci return dte & RK_DTE_PT_VALID; 2453d0407baSopenharmony_ci} 2463d0407baSopenharmony_ci 2473d0407baSopenharmony_cistatic inline u32 rk_mk_dte(dma_addr_t pt_dma) 2483d0407baSopenharmony_ci{ 2493d0407baSopenharmony_ci return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; 2503d0407baSopenharmony_ci} 2513d0407baSopenharmony_ci 2523d0407baSopenharmony_cistatic inline u32 rk_mk_dte_v2(dma_addr_t pt_dma) 2533d0407baSopenharmony_ci{ 2543d0407baSopenharmony_ci pt_dma = (pt_dma & PAGE_DESC_LO_MASK) | ((pt_dma & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) | 2553d0407baSopenharmony_ci ((pt_dma & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2); 2563d0407baSopenharmony_ci 2573d0407baSopenharmony_ci return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID; 2583d0407baSopenharmony_ci} 2593d0407baSopenharmony_ci 2603d0407baSopenharmony_ci/* 2613d0407baSopenharmony_ci * Each PTE has a Page address, some flags and a valid bit: 2623d0407baSopenharmony_ci * +---------------------+---+-------+-+ 2633d0407baSopenharmony_ci * | Page address |Rsv| Flags |V| 2643d0407baSopenharmony_ci * +---------------------+---+-------+-+ 2653d0407baSopenharmony_ci * 31:12 - Page address (Pages always start on a 4 KB boundary) 2663d0407baSopenharmony_ci * 11: 9 - Reserved 2673d0407baSopenharmony_ci * 8: 1 - Flags 2683d0407baSopenharmony_ci * 8 - Read allocate - allocate cache space on read misses 2693d0407baSopenharmony_ci * 7 - Read cache - enable cache & prefetch of data 2703d0407baSopenharmony_ci * 6 - Write buffer - enable delaying writes on their way to memory 2713d0407baSopenharmony_ci * 5 - Write allocate - allocate cache space on write misses 2723d0407baSopenharmony_ci * 4 - Write cache - different writes can be merged together 2733d0407baSopenharmony_ci * 3 - Override cache attributes 2743d0407baSopenharmony_ci * if 1, bits 4-8 control cache attributes 2753d0407baSopenharmony_ci * if 0, the system bus defaults are used 2763d0407baSopenharmony_ci * 2 - Writable 2773d0407baSopenharmony_ci * 1 - Readable 2783d0407baSopenharmony_ci * 0 - 1 if Page @ Page address is valid 2793d0407baSopenharmony_ci */ 2803d0407baSopenharmony_ci#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 2813d0407baSopenharmony_ci#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe 2823d0407baSopenharmony_ci#define RK_PTE_PAGE_WRITABLE BIT(2) 2833d0407baSopenharmony_ci#define RK_PTE_PAGE_READABLE BIT(1) 2843d0407baSopenharmony_ci#define RK_PTE_PAGE_VALID BIT(0) 2853d0407baSopenharmony_ci 2863d0407baSopenharmony_ci/* 2873d0407baSopenharmony_ci * In v2: 2883d0407baSopenharmony_ci * 31:12 - Page address bit 31:0 2893d0407baSopenharmony_ci * 11:9 - Page address bit 34:32 2903d0407baSopenharmony_ci * 8:4 - Page address bit 39:35 2913d0407baSopenharmony_ci * 3 - Security 2923d0407baSopenharmony_ci * 2 - Writable 2933d0407baSopenharmony_ci * 1 - Readable 2943d0407baSopenharmony_ci * 0 - 1 if Page @ Page address is valid 2953d0407baSopenharmony_ci */ 2963d0407baSopenharmony_ci#define RK_PTE_PAGE_ADDRESS_MASK_V2 0xfffffff0 2973d0407baSopenharmony_ci#define RK_PTE_PAGE_FLAGS_MASK_V2 0x0000000e 2983d0407baSopenharmony_ci#define RK_PTE_PAGE_READABLE_V2 BIT(1) 2993d0407baSopenharmony_ci#define RK_PTE_PAGE_WRITABLE_V2 BIT(2) 3003d0407baSopenharmony_ci 3013d0407baSopenharmony_cistatic inline phys_addr_t rk_pte_page_address(u32 pte) 3023d0407baSopenharmony_ci{ 3033d0407baSopenharmony_ci return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; 3043d0407baSopenharmony_ci} 3053d0407baSopenharmony_ci 3063d0407baSopenharmony_cistatic inline phys_addr_t rk_pte_page_address_v2(u32 pte) 3073d0407baSopenharmony_ci{ 3083d0407baSopenharmony_ci u64 pte_v2 = pte; 3093d0407baSopenharmony_ci 3103d0407baSopenharmony_ci pte_v2 = ((pte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) | ((pte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) | 3113d0407baSopenharmony_ci (pte_v2 & PAGE_DESC_LO_MASK); 3123d0407baSopenharmony_ci 3133d0407baSopenharmony_ci return (phys_addr_t)pte_v2; 3143d0407baSopenharmony_ci} 3153d0407baSopenharmony_ci 3163d0407baSopenharmony_cistatic inline bool rk_pte_is_page_valid(u32 pte) 3173d0407baSopenharmony_ci{ 3183d0407baSopenharmony_ci return pte & RK_PTE_PAGE_VALID; 3193d0407baSopenharmony_ci} 3203d0407baSopenharmony_ci 3213d0407baSopenharmony_ci/* set cache flags per prot IOMMU_CACHE */ 3223d0407baSopenharmony_cistatic u32 rk_mk_pte(phys_addr_t page, int prot) 3233d0407baSopenharmony_ci{ 3243d0407baSopenharmony_ci u32 flags = 0; 3253d0407baSopenharmony_ci flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; 3263d0407baSopenharmony_ci flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; 3273d0407baSopenharmony_ci page &= RK_PTE_PAGE_ADDRESS_MASK; 3283d0407baSopenharmony_ci return page | flags | RK_PTE_PAGE_VALID; 3293d0407baSopenharmony_ci} 3303d0407baSopenharmony_ci 3313d0407baSopenharmony_cistatic u32 rk_mk_pte_v2(phys_addr_t page, int prot) 3323d0407baSopenharmony_ci{ 3333d0407baSopenharmony_ci u32 flags = 0; 3343d0407baSopenharmony_ci 3353d0407baSopenharmony_ci flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0; 3363d0407baSopenharmony_ci flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0; 3373d0407baSopenharmony_ci page = (page & PAGE_DESC_LO_MASK) | ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) | 3383d0407baSopenharmony_ci ((page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2); 3393d0407baSopenharmony_ci page &= RK_PTE_PAGE_ADDRESS_MASK_V2; 3403d0407baSopenharmony_ci 3413d0407baSopenharmony_ci return page | flags | RK_PTE_PAGE_VALID; 3423d0407baSopenharmony_ci} 3433d0407baSopenharmony_ci 3443d0407baSopenharmony_cistatic u32 rk_mk_pte_invalid(u32 pte) 3453d0407baSopenharmony_ci{ 3463d0407baSopenharmony_ci return pte & ~RK_PTE_PAGE_VALID; 3473d0407baSopenharmony_ci} 3483d0407baSopenharmony_ci 3493d0407baSopenharmony_ci/* 3503d0407baSopenharmony_ci * rk3288 iova (IOMMU Virtual Address) format 3513d0407baSopenharmony_ci * 31 22.21 12.11 0 3523d0407baSopenharmony_ci * +-----------+-----------+-------------+ 3533d0407baSopenharmony_ci * | DTE index | PTE index | Page offset | 3543d0407baSopenharmony_ci * +-----------+-----------+-------------+ 3553d0407baSopenharmony_ci * 31:22 - DTE index - index of DTE in DT 3563d0407baSopenharmony_ci * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address 3573d0407baSopenharmony_ci * 11: 0 - Page offset - offset into page @ PTE.page_address 3583d0407baSopenharmony_ci */ 3593d0407baSopenharmony_ci#define RK_IOVA_DTE_MASK 0xffc00000 3603d0407baSopenharmony_ci#define RK_IOVA_DTE_SHIFT 22 3613d0407baSopenharmony_ci#define RK_IOVA_PTE_MASK 0x003ff000 3623d0407baSopenharmony_ci#define RK_IOVA_PTE_SHIFT 12 3633d0407baSopenharmony_ci#define RK_IOVA_PAGE_MASK 0x00000fff 3643d0407baSopenharmony_ci#define RK_IOVA_PAGE_SHIFT 0 3653d0407baSopenharmony_ci 3663d0407baSopenharmony_cistatic u32 rk_iova_dte_index(dma_addr_t iova) 3673d0407baSopenharmony_ci{ 3683d0407baSopenharmony_ci return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; 3693d0407baSopenharmony_ci} 3703d0407baSopenharmony_ci 3713d0407baSopenharmony_cistatic u32 rk_iova_pte_index(dma_addr_t iova) 3723d0407baSopenharmony_ci{ 3733d0407baSopenharmony_ci return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; 3743d0407baSopenharmony_ci} 3753d0407baSopenharmony_ci 3763d0407baSopenharmony_cistatic u32 rk_iova_page_offset(dma_addr_t iova) 3773d0407baSopenharmony_ci{ 3783d0407baSopenharmony_ci return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; 3793d0407baSopenharmony_ci} 3803d0407baSopenharmony_ci 3813d0407baSopenharmony_cistatic u32 rk_iommu_read(void __iomem *base, u32 offset) 3823d0407baSopenharmony_ci{ 3833d0407baSopenharmony_ci return readl(base + offset); 3843d0407baSopenharmony_ci} 3853d0407baSopenharmony_ci 3863d0407baSopenharmony_cistatic void rk_iommu_write(void __iomem *base, u32 offset, u32 value) 3873d0407baSopenharmony_ci{ 3883d0407baSopenharmony_ci writel(value, base + offset); 3893d0407baSopenharmony_ci} 3903d0407baSopenharmony_ci 3913d0407baSopenharmony_cistatic void rk_iommu_command(struct rk_iommu *iommu, u32 command) 3923d0407baSopenharmony_ci{ 3933d0407baSopenharmony_ci int i; 3943d0407baSopenharmony_ci 3953d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 3963d0407baSopenharmony_ci writel(command, iommu->bases[i] + RK_MMU_COMMAND); 3973d0407baSopenharmony_ci } 3983d0407baSopenharmony_ci} 3993d0407baSopenharmony_ci 4003d0407baSopenharmony_cistatic void rk_iommu_base_command(void __iomem *base, u32 command) 4013d0407baSopenharmony_ci{ 4023d0407baSopenharmony_ci writel(command, base + RK_MMU_COMMAND); 4033d0407baSopenharmony_ci} 4043d0407baSopenharmony_cistatic void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, size_t size) 4053d0407baSopenharmony_ci{ 4063d0407baSopenharmony_ci int i; 4073d0407baSopenharmony_ci dma_addr_t iova_end = iova_start + size; 4083d0407baSopenharmony_ci /* 4093d0407baSopenharmony_ci * (djkurtz): Figure out when it is more efficient to shootdown the 4103d0407baSopenharmony_ci * entire iotlb rather than iterate over individual iovas. 4113d0407baSopenharmony_ci */ 4123d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 4133d0407baSopenharmony_ci dma_addr_t iova; 4143d0407baSopenharmony_ci 4153d0407baSopenharmony_ci for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) { 4163d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); 4173d0407baSopenharmony_ci } 4183d0407baSopenharmony_ci } 4193d0407baSopenharmony_ci} 4203d0407baSopenharmony_ci 4213d0407baSopenharmony_cistatic bool rk_iommu_is_stall_active(struct rk_iommu *iommu) 4223d0407baSopenharmony_ci{ 4233d0407baSopenharmony_ci bool active = true; 4243d0407baSopenharmony_ci int i; 4253d0407baSopenharmony_ci 4263d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 4273d0407baSopenharmony_ci active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE); 4283d0407baSopenharmony_ci } 4293d0407baSopenharmony_ci 4303d0407baSopenharmony_ci return active; 4313d0407baSopenharmony_ci} 4323d0407baSopenharmony_ci 4333d0407baSopenharmony_cistatic bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) 4343d0407baSopenharmony_ci{ 4353d0407baSopenharmony_ci bool enable = true; 4363d0407baSopenharmony_ci int i; 4373d0407baSopenharmony_ci 4383d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 4393d0407baSopenharmony_ci enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & RK_MMU_STATUS_PAGING_ENABLED); 4403d0407baSopenharmony_ci } 4413d0407baSopenharmony_ci 4423d0407baSopenharmony_ci return enable; 4433d0407baSopenharmony_ci} 4443d0407baSopenharmony_ci 4453d0407baSopenharmony_cistatic bool rk_iommu_is_reset_done(struct rk_iommu *iommu) 4463d0407baSopenharmony_ci{ 4473d0407baSopenharmony_ci bool done = true; 4483d0407baSopenharmony_ci int i; 4493d0407baSopenharmony_ci 4503d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 4513d0407baSopenharmony_ci done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; 4523d0407baSopenharmony_ci } 4533d0407baSopenharmony_ci 4543d0407baSopenharmony_ci return done; 4553d0407baSopenharmony_ci} 4563d0407baSopenharmony_ci 4573d0407baSopenharmony_cistatic int rk_iommu_enable_stall(struct rk_iommu *iommu) 4583d0407baSopenharmony_ci{ 4593d0407baSopenharmony_ci int ret, i; 4603d0407baSopenharmony_ci bool val; 4613d0407baSopenharmony_ci int retry_count = 0; 4623d0407baSopenharmony_ci 4633d0407baSopenharmony_ci if (iommu->skip_read) { 4643d0407baSopenharmony_ci goto read_wa; 4653d0407baSopenharmony_ci } 4663d0407baSopenharmony_ci 4673d0407baSopenharmony_ci if (rk_iommu_is_stall_active(iommu)) { 4683d0407baSopenharmony_ci return 0; 4693d0407baSopenharmony_ci } 4703d0407baSopenharmony_ci 4713d0407baSopenharmony_ci /* Stall can only be enabled if paging is enabled */ 4723d0407baSopenharmony_ci if (!rk_iommu_is_paging_enabled(iommu)) { 4733d0407baSopenharmony_ci return 0; 4743d0407baSopenharmony_ci } 4753d0407baSopenharmony_ci 4763d0407baSopenharmony_ciread_wa: 4773d0407baSopenharmony_ci while (1) { 4783d0407baSopenharmony_ci rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); 4793d0407baSopenharmony_ci if (iommu->skip_read) { 4803d0407baSopenharmony_ci return 0; 4813d0407baSopenharmony_ci } 4823d0407baSopenharmony_ci 4833d0407baSopenharmony_ci ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, val, RK_MMU_POLL_PERIOD_US, 4843d0407baSopenharmony_ci RK_MMU_POLL_TIMEOUT_US); 4853d0407baSopenharmony_ci if (ret) { 4863d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 4873d0407baSopenharmony_ci dev_err(iommu->dev, "Enable stall request timed out, retry_count = %d, status: %#08x\n", retry_count, 4883d0407baSopenharmony_ci rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 4893d0407baSopenharmony_ci } 4903d0407baSopenharmony_ci if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) { 4913d0407baSopenharmony_ci continue; 4923d0407baSopenharmony_ci } 4933d0407baSopenharmony_ci } 4943d0407baSopenharmony_ci break; 4953d0407baSopenharmony_ci } 4963d0407baSopenharmony_ci return ret; 4973d0407baSopenharmony_ci} 4983d0407baSopenharmony_ci 4993d0407baSopenharmony_cistatic int rk_iommu_disable_stall(struct rk_iommu *iommu) 5003d0407baSopenharmony_ci{ 5013d0407baSopenharmony_ci int ret, i; 5023d0407baSopenharmony_ci bool val; 5033d0407baSopenharmony_ci int retry_count = 0; 5043d0407baSopenharmony_ci 5053d0407baSopenharmony_ci if (iommu->skip_read) { 5063d0407baSopenharmony_ci goto read_wa; 5073d0407baSopenharmony_ci } 5083d0407baSopenharmony_ci 5093d0407baSopenharmony_ci if (!rk_iommu_is_stall_active(iommu)) { 5103d0407baSopenharmony_ci return 0; 5113d0407baSopenharmony_ci } 5123d0407baSopenharmony_ci 5133d0407baSopenharmony_ciread_wa: 5143d0407baSopenharmony_ci while (1) { 5153d0407baSopenharmony_ci rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); 5163d0407baSopenharmony_ci if (iommu->skip_read) { 5173d0407baSopenharmony_ci return 0; 5183d0407baSopenharmony_ci } 5193d0407baSopenharmony_ci 5203d0407baSopenharmony_ci ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, !val, RK_MMU_POLL_PERIOD_US, 5213d0407baSopenharmony_ci RK_MMU_POLL_TIMEOUT_US); 5223d0407baSopenharmony_ci if (ret) { 5233d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 5243d0407baSopenharmony_ci dev_err(iommu->dev, "Disable stall request timed out, retry_count = %d, status: %#08x\n", retry_count, 5253d0407baSopenharmony_ci rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 5263d0407baSopenharmony_ci } 5273d0407baSopenharmony_ci if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) { 5283d0407baSopenharmony_ci continue; 5293d0407baSopenharmony_ci } 5303d0407baSopenharmony_ci } 5313d0407baSopenharmony_ci break; 5323d0407baSopenharmony_ci } 5333d0407baSopenharmony_ci return ret; 5343d0407baSopenharmony_ci} 5353d0407baSopenharmony_ci 5363d0407baSopenharmony_cistatic int rk_iommu_enable_paging(struct rk_iommu *iommu) 5373d0407baSopenharmony_ci{ 5383d0407baSopenharmony_ci int ret, i; 5393d0407baSopenharmony_ci bool val; 5403d0407baSopenharmony_ci int retry_count = 0; 5413d0407baSopenharmony_ci 5423d0407baSopenharmony_ci if (iommu->skip_read) { 5433d0407baSopenharmony_ci goto read_wa; 5443d0407baSopenharmony_ci } 5453d0407baSopenharmony_ci 5463d0407baSopenharmony_ci if (rk_iommu_is_paging_enabled(iommu)) { 5473d0407baSopenharmony_ci return 0; 5483d0407baSopenharmony_ci } 5493d0407baSopenharmony_ci 5503d0407baSopenharmony_ciread_wa: 5513d0407baSopenharmony_ci while (1) { 5523d0407baSopenharmony_ci rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); 5533d0407baSopenharmony_ci if (iommu->skip_read) { 5543d0407baSopenharmony_ci return 0; 5553d0407baSopenharmony_ci } 5563d0407baSopenharmony_ci 5573d0407baSopenharmony_ci ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, val, RK_MMU_POLL_PERIOD_US, 5583d0407baSopenharmony_ci RK_MMU_POLL_TIMEOUT_US); 5593d0407baSopenharmony_ci if (ret) { 5603d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 5613d0407baSopenharmony_ci dev_err(iommu->dev, "Enable paging request timed out, retry_count = %d, status: %#08x\n", retry_count, 5623d0407baSopenharmony_ci rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 5633d0407baSopenharmony_ci } 5643d0407baSopenharmony_ci if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) { 5653d0407baSopenharmony_ci continue; 5663d0407baSopenharmony_ci } 5673d0407baSopenharmony_ci } 5683d0407baSopenharmony_ci break; 5693d0407baSopenharmony_ci } 5703d0407baSopenharmony_ci return ret; 5713d0407baSopenharmony_ci} 5723d0407baSopenharmony_ci 5733d0407baSopenharmony_cistatic int rk_iommu_disable_paging(struct rk_iommu *iommu) 5743d0407baSopenharmony_ci{ 5753d0407baSopenharmony_ci int ret, i; 5763d0407baSopenharmony_ci bool val; 5773d0407baSopenharmony_ci int retry_count = 0; 5783d0407baSopenharmony_ci 5793d0407baSopenharmony_ci if (iommu->skip_read) { 5803d0407baSopenharmony_ci goto read_wa; 5813d0407baSopenharmony_ci } 5823d0407baSopenharmony_ci 5833d0407baSopenharmony_ci if (!rk_iommu_is_paging_enabled(iommu)) { 5843d0407baSopenharmony_ci return 0; 5853d0407baSopenharmony_ci } 5863d0407baSopenharmony_ci 5873d0407baSopenharmony_ciread_wa: 5883d0407baSopenharmony_ci while (1) { 5893d0407baSopenharmony_ci rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); 5903d0407baSopenharmony_ci if (iommu->skip_read) { 5913d0407baSopenharmony_ci return 0; 5923d0407baSopenharmony_ci } 5933d0407baSopenharmony_ci 5943d0407baSopenharmony_ci ret = 5953d0407baSopenharmony_ci readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, !val, RK_MMU_POLL_PERIOD_US, 5963d0407baSopenharmony_ci RK_MMU_POLL_TIMEOUT_US); 5973d0407baSopenharmony_ci if (ret) { 5983d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 5993d0407baSopenharmony_ci dev_err(iommu->dev, "Disable paging request timed out, retry_count = %d, status: %#08x\n", retry_count, 6003d0407baSopenharmony_ci rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 6013d0407baSopenharmony_ci } 6023d0407baSopenharmony_ci if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) { 6033d0407baSopenharmony_ci continue; 6043d0407baSopenharmony_ci } 6053d0407baSopenharmony_ci } 6063d0407baSopenharmony_ci break; 6073d0407baSopenharmony_ci } 6083d0407baSopenharmony_ci return ret; 6093d0407baSopenharmony_ci} 6103d0407baSopenharmony_ci 6113d0407baSopenharmony_cistatic int rk_iommu_force_reset(struct rk_iommu *iommu) 6123d0407baSopenharmony_ci{ 6133d0407baSopenharmony_ci int ret, i; 6143d0407baSopenharmony_ci u32 dte_addr; 6153d0407baSopenharmony_ci bool val; 6163d0407baSopenharmony_ci u32 address_mask; 6173d0407baSopenharmony_ci 6183d0407baSopenharmony_ci if (iommu->reset_disabled) { 6193d0407baSopenharmony_ci return 0; 6203d0407baSopenharmony_ci } 6213d0407baSopenharmony_ci 6223d0407baSopenharmony_ci if (iommu->skip_read) { 6233d0407baSopenharmony_ci goto read_wa; 6243d0407baSopenharmony_ci } 6253d0407baSopenharmony_ci 6263d0407baSopenharmony_ci /* 6273d0407baSopenharmony_ci * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY 6283d0407baSopenharmony_ci * and verifying that upper 5 nybbles are read back. 6293d0407baSopenharmony_ci */ 6303d0407baSopenharmony_ci 6313d0407baSopenharmony_ci /* 6323d0407baSopenharmony_ci * In v2: upper 7 nybbles are read back. 6333d0407baSopenharmony_ci */ 6343d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 6353d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); 6363d0407baSopenharmony_ci 6373d0407baSopenharmony_ci if (iommu->version >= 0x2) { 6383d0407baSopenharmony_ci address_mask = RK_DTE_PT_ADDRESS_MASK_V2; 6393d0407baSopenharmony_ci } else { 6403d0407baSopenharmony_ci address_mask = RK_DTE_PT_ADDRESS_MASK; 6413d0407baSopenharmony_ci } 6423d0407baSopenharmony_ci dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); 6433d0407baSopenharmony_ci if (dte_addr != (DTE_ADDR_DUMMY & address_mask)) { 6443d0407baSopenharmony_ci dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); 6453d0407baSopenharmony_ci return -EFAULT; 6463d0407baSopenharmony_ci } 6473d0407baSopenharmony_ci } 6483d0407baSopenharmony_ci 6493d0407baSopenharmony_ciread_wa: 6503d0407baSopenharmony_ci rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); 6513d0407baSopenharmony_ci if (iommu->skip_read) { 6523d0407baSopenharmony_ci return 0; 6533d0407baSopenharmony_ci } 6543d0407baSopenharmony_ci 6553d0407baSopenharmony_ci ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, val, RK_MMU_FORCE_RESET_TIMEOUT_US, 6563d0407baSopenharmony_ci RK_MMU_POLL_TIMEOUT_US); 6573d0407baSopenharmony_ci if (ret) { 6583d0407baSopenharmony_ci dev_err(iommu->dev, "FORCE_RESET command timed out\n"); 6593d0407baSopenharmony_ci return ret; 6603d0407baSopenharmony_ci } 6613d0407baSopenharmony_ci 6623d0407baSopenharmony_ci return 0; 6633d0407baSopenharmony_ci} 6643d0407baSopenharmony_ci 6653d0407baSopenharmony_cistatic void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) 6663d0407baSopenharmony_ci{ 6673d0407baSopenharmony_ci void __iomem *base = iommu->bases[index]; 6683d0407baSopenharmony_ci u32 dte_index, pte_index, page_offset; 6693d0407baSopenharmony_ci u32 mmu_dte_addr; 6703d0407baSopenharmony_ci phys_addr_t mmu_dte_addr_phys, dte_addr_phys; 6713d0407baSopenharmony_ci u32 *dte_addr; 6723d0407baSopenharmony_ci u32 dte; 6733d0407baSopenharmony_ci phys_addr_t pte_addr_phys = 0; 6743d0407baSopenharmony_ci u32 *pte_addr = NULL; 6753d0407baSopenharmony_ci u32 pte = 0; 6763d0407baSopenharmony_ci phys_addr_t page_addr_phys = 0; 6773d0407baSopenharmony_ci u32 page_flags = 0; 6783d0407baSopenharmony_ci 6793d0407baSopenharmony_ci dte_index = rk_iova_dte_index(iova); 6803d0407baSopenharmony_ci pte_index = rk_iova_pte_index(iova); 6813d0407baSopenharmony_ci page_offset = rk_iova_page_offset(iova); 6823d0407baSopenharmony_ci 6833d0407baSopenharmony_ci mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); 6843d0407baSopenharmony_ci mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; 6853d0407baSopenharmony_ci if (iommu->version >= RK_IOMMU_VERSION_CMP) { 6863d0407baSopenharmony_ci mmu_dte_addr_phys = (mmu_dte_addr_phys & DT_LO_MASK) | ((mmu_dte_addr_phys & DTE_BASE_HI_MASK) << DT_SHIFT); 6873d0407baSopenharmony_ci } 6883d0407baSopenharmony_ci 6893d0407baSopenharmony_ci dte_addr_phys = mmu_dte_addr_phys + (RK_ADDR_PHYS_MUL * dte_index); 6903d0407baSopenharmony_ci dte_addr = phys_to_virt(dte_addr_phys); 6913d0407baSopenharmony_ci dte = *dte_addr; 6923d0407baSopenharmony_ci 6933d0407baSopenharmony_ci if (!rk_dte_is_pt_valid(dte)) { 6943d0407baSopenharmony_ci goto print_it; 6953d0407baSopenharmony_ci } 6963d0407baSopenharmony_ci 6973d0407baSopenharmony_ci if (iommu->version >= RK_IOMMU_VERSION_CMP) { 6983d0407baSopenharmony_ci pte_addr_phys = rk_dte_pt_address_v2(dte) + (pte_index * RK_ADDR_PHYS_MUL); 6993d0407baSopenharmony_ci } else { 7003d0407baSopenharmony_ci pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * RK_ADDR_PHYS_MUL); 7013d0407baSopenharmony_ci } 7023d0407baSopenharmony_ci pte_addr = phys_to_virt(pte_addr_phys); 7033d0407baSopenharmony_ci pte = *pte_addr; 7043d0407baSopenharmony_ci 7053d0407baSopenharmony_ci if (!rk_pte_is_page_valid(pte)) { 7063d0407baSopenharmony_ci goto print_it; 7073d0407baSopenharmony_ci } 7083d0407baSopenharmony_ci 7093d0407baSopenharmony_ci if (iommu->version >= RK_IOMMU_VERSION_CMP) { 7103d0407baSopenharmony_ci page_addr_phys = rk_pte_page_address_v2(pte) + page_offset; 7113d0407baSopenharmony_ci } else { 7123d0407baSopenharmony_ci page_addr_phys = rk_pte_page_address(pte) + page_offset; 7133d0407baSopenharmony_ci } 7143d0407baSopenharmony_ci page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; 7153d0407baSopenharmony_ci 7163d0407baSopenharmony_ciprint_it: 7173d0407baSopenharmony_ci dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", &iova, dte_index, 7183d0407baSopenharmony_ci pte_index, page_offset); 7193d0407baSopenharmony_ci dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", 7203d0407baSopenharmony_ci &mmu_dte_addr_phys, &dte_addr_phys, dte, rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, 7213d0407baSopenharmony_ci rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); 7223d0407baSopenharmony_ci} 7233d0407baSopenharmony_ci 7243d0407baSopenharmony_cistatic irqreturn_t rk_iommu_irq(int irq, void *dev_id) 7253d0407baSopenharmony_ci{ 7263d0407baSopenharmony_ci struct rk_iommu *iommu = dev_id; 7273d0407baSopenharmony_ci u32 status; 7283d0407baSopenharmony_ci u32 int_status; 7293d0407baSopenharmony_ci u32 int_mask; 7303d0407baSopenharmony_ci dma_addr_t iova; 7313d0407baSopenharmony_ci irqreturn_t ret = IRQ_NONE; 7323d0407baSopenharmony_ci int i, err; 7333d0407baSopenharmony_ci 7343d0407baSopenharmony_ci err = pm_runtime_get_if_in_use(iommu->dev); 7353d0407baSopenharmony_ci if (!err || WARN_ON_ONCE(err < 0)) { 7363d0407baSopenharmony_ci return ret; 7373d0407baSopenharmony_ci } 7383d0407baSopenharmony_ci 7393d0407baSopenharmony_ci if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) { 7403d0407baSopenharmony_ci goto out; 7413d0407baSopenharmony_ci } 7423d0407baSopenharmony_ci 7433d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 7443d0407baSopenharmony_ci int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); 7453d0407baSopenharmony_ci if (int_status == 0) { 7463d0407baSopenharmony_ci continue; 7473d0407baSopenharmony_ci } 7483d0407baSopenharmony_ci 7493d0407baSopenharmony_ci ret = IRQ_HANDLED; 7503d0407baSopenharmony_ci iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); 7513d0407baSopenharmony_ci 7523d0407baSopenharmony_ci if (int_status & RK_MMU_IRQ_PAGE_FAULT) { 7533d0407baSopenharmony_ci int flags; 7543d0407baSopenharmony_ci 7553d0407baSopenharmony_ci status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); 7563d0407baSopenharmony_ci flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 7573d0407baSopenharmony_ci 7583d0407baSopenharmony_ci dev_err(iommu->dev, "Page fault at %pad of type %s\n", &iova, 7593d0407baSopenharmony_ci (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); 7603d0407baSopenharmony_ci 7613d0407baSopenharmony_ci log_iova(iommu, i, iova); 7623d0407baSopenharmony_ci 7633d0407baSopenharmony_ci /* 7643d0407baSopenharmony_ci * Report page fault to any installed handlers. 7653d0407baSopenharmony_ci * Ignore the return code, though, since we always zap cache 7663d0407baSopenharmony_ci * and clear the page fault anyway. 7673d0407baSopenharmony_ci */ 7683d0407baSopenharmony_ci if (iommu->domain) { 7693d0407baSopenharmony_ci report_iommu_fault(iommu->domain, iommu->dev, iova, status); 7703d0407baSopenharmony_ci } else { 7713d0407baSopenharmony_ci dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); 7723d0407baSopenharmony_ci } 7733d0407baSopenharmony_ci 7743d0407baSopenharmony_ci rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 7753d0407baSopenharmony_ci 7763d0407baSopenharmony_ci /* 7773d0407baSopenharmony_ci * Master may clear the int_mask to prevent iommu 7783d0407baSopenharmony_ci * re-enter interrupt when mapping. So we postpone 7793d0407baSopenharmony_ci * sending PAGE_FAULT_DONE command to mapping finished. 7803d0407baSopenharmony_ci */ 7813d0407baSopenharmony_ci int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK); 7823d0407baSopenharmony_ci if (int_mask != 0x0) { 7833d0407baSopenharmony_ci rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); 7843d0407baSopenharmony_ci } 7853d0407baSopenharmony_ci } 7863d0407baSopenharmony_ci 7873d0407baSopenharmony_ci if (int_status & RK_MMU_IRQ_BUS_ERROR) { 7883d0407baSopenharmony_ci dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); 7893d0407baSopenharmony_ci } 7903d0407baSopenharmony_ci 7913d0407baSopenharmony_ci if (int_status & ~RK_MMU_IRQ_MASK) { 7923d0407baSopenharmony_ci dev_err(iommu->dev, "unexpected int_status: %#08x\n", int_status); 7933d0407baSopenharmony_ci } 7943d0407baSopenharmony_ci 7953d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); 7963d0407baSopenharmony_ci } 7973d0407baSopenharmony_ci 7983d0407baSopenharmony_ci clk_bulk_disable(iommu->num_clocks, iommu->clocks); 7993d0407baSopenharmony_ci 8003d0407baSopenharmony_ciout: 8013d0407baSopenharmony_ci pm_runtime_put(iommu->dev); 8023d0407baSopenharmony_ci return ret; 8033d0407baSopenharmony_ci} 8043d0407baSopenharmony_ci 8053d0407baSopenharmony_cistatic phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 8063d0407baSopenharmony_ci{ 8073d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 8083d0407baSopenharmony_ci unsigned long flags; 8093d0407baSopenharmony_ci phys_addr_t pt_phys, phys = 0; 8103d0407baSopenharmony_ci u32 dte, pte; 8113d0407baSopenharmony_ci u32 *page_table; 8123d0407baSopenharmony_ci 8133d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->dt_lock, flags); 8143d0407baSopenharmony_ci 8153d0407baSopenharmony_ci dte = rk_domain->dt[rk_iova_dte_index(iova)]; 8163d0407baSopenharmony_ci if (!rk_dte_is_pt_valid(dte)) { 8173d0407baSopenharmony_ci goto out; 8183d0407baSopenharmony_ci } 8193d0407baSopenharmony_ci 8203d0407baSopenharmony_ci pt_phys = rk_dte_pt_address(dte); 8213d0407baSopenharmony_ci page_table = (u32 *)phys_to_virt(pt_phys); 8223d0407baSopenharmony_ci pte = page_table[rk_iova_pte_index(iova)]; 8233d0407baSopenharmony_ci if (!rk_pte_is_page_valid(pte)) { 8243d0407baSopenharmony_ci goto out; 8253d0407baSopenharmony_ci } 8263d0407baSopenharmony_ci 8273d0407baSopenharmony_ci phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); 8283d0407baSopenharmony_ciout: 8293d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 8303d0407baSopenharmony_ci 8313d0407baSopenharmony_ci return phys; 8323d0407baSopenharmony_ci} 8333d0407baSopenharmony_ci 8343d0407baSopenharmony_cistatic phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain, dma_addr_t iova) 8353d0407baSopenharmony_ci{ 8363d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 8373d0407baSopenharmony_ci unsigned long flags; 8383d0407baSopenharmony_ci phys_addr_t pt_phys, phys = 0; 8393d0407baSopenharmony_ci u32 dte, pte; 8403d0407baSopenharmony_ci u32 *page_table; 8413d0407baSopenharmony_ci 8423d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->dt_lock, flags); 8433d0407baSopenharmony_ci 8443d0407baSopenharmony_ci dte = rk_domain->dt[rk_iova_dte_index(iova)]; 8453d0407baSopenharmony_ci if (!rk_dte_is_pt_valid(dte)) { 8463d0407baSopenharmony_ci goto out; 8473d0407baSopenharmony_ci } 8483d0407baSopenharmony_ci 8493d0407baSopenharmony_ci pt_phys = rk_dte_pt_address_v2(dte); 8503d0407baSopenharmony_ci page_table = (u32 *)phys_to_virt(pt_phys); 8513d0407baSopenharmony_ci pte = page_table[rk_iova_pte_index(iova)]; 8523d0407baSopenharmony_ci if (!rk_pte_is_page_valid(pte)) { 8533d0407baSopenharmony_ci goto out; 8543d0407baSopenharmony_ci } 8553d0407baSopenharmony_ci 8563d0407baSopenharmony_ci phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova); 8573d0407baSopenharmony_ciout: 8583d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 8593d0407baSopenharmony_ci 8603d0407baSopenharmony_ci return phys; 8613d0407baSopenharmony_ci} 8623d0407baSopenharmony_ci 8633d0407baSopenharmony_cistatic void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, dma_addr_t iova, size_t size) 8643d0407baSopenharmony_ci{ 8653d0407baSopenharmony_ci struct list_head *pos; 8663d0407baSopenharmony_ci unsigned long flags; 8673d0407baSopenharmony_ci 8683d0407baSopenharmony_ci /* shootdown these iova from all iommus using this domain */ 8693d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->iommus_lock, flags); 8703d0407baSopenharmony_ci list_for_each(pos, &rk_domain->iommus) 8713d0407baSopenharmony_ci { 8723d0407baSopenharmony_ci struct rk_iommu *iommu; 8733d0407baSopenharmony_ci int ret; 8743d0407baSopenharmony_ci 8753d0407baSopenharmony_ci iommu = list_entry(pos, struct rk_iommu, node); 8763d0407baSopenharmony_ci 8773d0407baSopenharmony_ci /* Only zap TLBs of IOMMUs that are powered on. */ 8783d0407baSopenharmony_ci ret = pm_runtime_get_if_in_use(iommu->dev); 8793d0407baSopenharmony_ci if (WARN_ON_ONCE(ret < 0)) { 8803d0407baSopenharmony_ci continue; 8813d0407baSopenharmony_ci } 8823d0407baSopenharmony_ci if (ret) { 8833d0407baSopenharmony_ci WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 8843d0407baSopenharmony_ci rk_iommu_zap_lines(iommu, iova, size); 8853d0407baSopenharmony_ci clk_bulk_disable(iommu->num_clocks, iommu->clocks); 8863d0407baSopenharmony_ci pm_runtime_put(iommu->dev); 8873d0407baSopenharmony_ci } 8883d0407baSopenharmony_ci } 8893d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 8903d0407baSopenharmony_ci} 8913d0407baSopenharmony_ci 8923d0407baSopenharmony_cistatic void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, dma_addr_t iova, size_t size) 8933d0407baSopenharmony_ci{ 8943d0407baSopenharmony_ci rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); 8953d0407baSopenharmony_ci if (size > SPAGE_SIZE) { 8963d0407baSopenharmony_ci rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, SPAGE_SIZE); 8973d0407baSopenharmony_ci } 8983d0407baSopenharmony_ci} 8993d0407baSopenharmony_ci 9003d0407baSopenharmony_cistatic u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, dma_addr_t iova) 9013d0407baSopenharmony_ci{ 9023d0407baSopenharmony_ci u32 *page_table, *dte_addr; 9033d0407baSopenharmony_ci u32 dte_index, dte; 9043d0407baSopenharmony_ci phys_addr_t pt_phys; 9053d0407baSopenharmony_ci dma_addr_t pt_dma; 9063d0407baSopenharmony_ci 9073d0407baSopenharmony_ci assert_spin_locked(&rk_domain->dt_lock); 9083d0407baSopenharmony_ci 9093d0407baSopenharmony_ci dte_index = rk_iova_dte_index(iova); 9103d0407baSopenharmony_ci dte_addr = &rk_domain->dt[dte_index]; 9113d0407baSopenharmony_ci dte = *dte_addr; 9123d0407baSopenharmony_ci if (rk_dte_is_pt_valid(dte)) { 9133d0407baSopenharmony_ci goto done; 9143d0407baSopenharmony_ci } 9153d0407baSopenharmony_ci 9163d0407baSopenharmony_ci page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); 9173d0407baSopenharmony_ci if (!page_table) { 9183d0407baSopenharmony_ci return ERR_PTR(-ENOMEM); 9193d0407baSopenharmony_ci } 9203d0407baSopenharmony_ci 9213d0407baSopenharmony_ci pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); 9223d0407baSopenharmony_ci if (dma_mapping_error(dma_dev, pt_dma)) { 9233d0407baSopenharmony_ci dev_err(dma_dev, "DMA mapping error while allocating page table\n"); 9243d0407baSopenharmony_ci free_page((unsigned long)page_table); 9253d0407baSopenharmony_ci return ERR_PTR(-ENOMEM); 9263d0407baSopenharmony_ci } 9273d0407baSopenharmony_ci 9283d0407baSopenharmony_ci dte = rk_mk_dte(pt_dma); 9293d0407baSopenharmony_ci *dte_addr = dte; 9303d0407baSopenharmony_ci 9313d0407baSopenharmony_ci rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); 9323d0407baSopenharmony_ci rk_table_flush(rk_domain, rk_domain->dt_dma + dte_index * sizeof(u32), 1); 9333d0407baSopenharmony_cidone: 9343d0407baSopenharmony_ci pt_phys = rk_dte_pt_address(dte); 9353d0407baSopenharmony_ci return (u32 *)phys_to_virt(pt_phys); 9363d0407baSopenharmony_ci} 9373d0407baSopenharmony_ci 9383d0407baSopenharmony_cistatic u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain, dma_addr_t iova) 9393d0407baSopenharmony_ci{ 9403d0407baSopenharmony_ci u32 *page_table, *dte_addr; 9413d0407baSopenharmony_ci u32 dte_index, dte; 9423d0407baSopenharmony_ci phys_addr_t pt_phys; 9433d0407baSopenharmony_ci dma_addr_t pt_dma; 9443d0407baSopenharmony_ci 9453d0407baSopenharmony_ci assert_spin_locked(&rk_domain->dt_lock); 9463d0407baSopenharmony_ci 9473d0407baSopenharmony_ci dte_index = rk_iova_dte_index(iova); 9483d0407baSopenharmony_ci dte_addr = &rk_domain->dt[dte_index]; 9493d0407baSopenharmony_ci dte = *dte_addr; 9503d0407baSopenharmony_ci if (rk_dte_is_pt_valid(dte)) { 9513d0407baSopenharmony_ci goto done; 9523d0407baSopenharmony_ci } 9533d0407baSopenharmony_ci 9543d0407baSopenharmony_ci page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); 9553d0407baSopenharmony_ci if (!page_table) { 9563d0407baSopenharmony_ci return ERR_PTR(-ENOMEM); 9573d0407baSopenharmony_ci } 9583d0407baSopenharmony_ci 9593d0407baSopenharmony_ci pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); 9603d0407baSopenharmony_ci if (dma_mapping_error(dma_dev, pt_dma)) { 9613d0407baSopenharmony_ci dev_err(dma_dev, "DMA mapping error while allocating page table\n"); 9623d0407baSopenharmony_ci free_page((unsigned long)page_table); 9633d0407baSopenharmony_ci return ERR_PTR(-ENOMEM); 9643d0407baSopenharmony_ci } 9653d0407baSopenharmony_ci 9663d0407baSopenharmony_ci dte = rk_mk_dte_v2(pt_dma); 9673d0407baSopenharmony_ci *dte_addr = dte; 9683d0407baSopenharmony_ci 9693d0407baSopenharmony_ci rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); 9703d0407baSopenharmony_ci rk_table_flush(rk_domain, rk_domain->dt_dma + dte_index * sizeof(u32), 1); 9713d0407baSopenharmony_cidone: 9723d0407baSopenharmony_ci pt_phys = rk_dte_pt_address_v2(dte); 9733d0407baSopenharmony_ci return (u32 *)phys_to_virt(pt_phys); 9743d0407baSopenharmony_ci} 9753d0407baSopenharmony_ci 9763d0407baSopenharmony_cistatic size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, dma_addr_t pte_dma, size_t size) 9773d0407baSopenharmony_ci{ 9783d0407baSopenharmony_ci unsigned int pte_count; 9793d0407baSopenharmony_ci unsigned int pte_total = size / SPAGE_SIZE; 9803d0407baSopenharmony_ci 9813d0407baSopenharmony_ci assert_spin_locked(&rk_domain->dt_lock); 9823d0407baSopenharmony_ci 9833d0407baSopenharmony_ci for (pte_count = 0; pte_count < pte_total; pte_count++) { 9843d0407baSopenharmony_ci u32 pte = pte_addr[pte_count]; 9853d0407baSopenharmony_ci if (!rk_pte_is_page_valid(pte)) { 9863d0407baSopenharmony_ci break; 9873d0407baSopenharmony_ci } 9883d0407baSopenharmony_ci 9893d0407baSopenharmony_ci pte_addr[pte_count] = rk_mk_pte_invalid(pte); 9903d0407baSopenharmony_ci } 9913d0407baSopenharmony_ci 9923d0407baSopenharmony_ci rk_table_flush(rk_domain, pte_dma, pte_count); 9933d0407baSopenharmony_ci 9943d0407baSopenharmony_ci return pte_count * SPAGE_SIZE; 9953d0407baSopenharmony_ci} 9963d0407baSopenharmony_ci 9973d0407baSopenharmony_cistatic int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, dma_addr_t pte_dma, dma_addr_t iova, 9983d0407baSopenharmony_ci phys_addr_t paddr, size_t size, int prot) 9993d0407baSopenharmony_ci{ 10003d0407baSopenharmony_ci unsigned int pte_count; 10013d0407baSopenharmony_ci unsigned int pte_total = size / SPAGE_SIZE; 10023d0407baSopenharmony_ci phys_addr_t page_phys; 10033d0407baSopenharmony_ci 10043d0407baSopenharmony_ci assert_spin_locked(&rk_domain->dt_lock); 10053d0407baSopenharmony_ci 10063d0407baSopenharmony_ci for (pte_count = 0; pte_count < pte_total; pte_count++) { 10073d0407baSopenharmony_ci u32 pte = pte_addr[pte_count]; 10083d0407baSopenharmony_ci 10093d0407baSopenharmony_ci if (rk_pte_is_page_valid(pte)) { 10103d0407baSopenharmony_ci goto unwind; 10113d0407baSopenharmony_ci } 10123d0407baSopenharmony_ci 10133d0407baSopenharmony_ci pte_addr[pte_count] = rk_mk_pte(paddr, prot); 10143d0407baSopenharmony_ci 10153d0407baSopenharmony_ci paddr += SPAGE_SIZE; 10163d0407baSopenharmony_ci } 10173d0407baSopenharmony_ci 10183d0407baSopenharmony_ci rk_table_flush(rk_domain, pte_dma, pte_total); 10193d0407baSopenharmony_ci 10203d0407baSopenharmony_ci /* 10213d0407baSopenharmony_ci * Zap the first and last iova to evict from iotlb any previously 10223d0407baSopenharmony_ci * mapped cachelines holding stale values for its dte and pte. 10233d0407baSopenharmony_ci * We only zap the first and last iova, since only they could have 10243d0407baSopenharmony_ci * dte or pte shared with an existing mapping. 10253d0407baSopenharmony_ci */ 10263d0407baSopenharmony_ci /* Do not zap tlb cache line if shootdown_entire set */ 10273d0407baSopenharmony_ci if (!rk_domain->shootdown_entire) { 10283d0407baSopenharmony_ci rk_iommu_zap_iova_first_last(rk_domain, iova, size); 10293d0407baSopenharmony_ci } 10303d0407baSopenharmony_ci 10313d0407baSopenharmony_ci return 0; 10323d0407baSopenharmony_ciunwind: 10333d0407baSopenharmony_ci /* Unmap the range of iovas that we just mapped */ 10343d0407baSopenharmony_ci rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, pte_count * SPAGE_SIZE); 10353d0407baSopenharmony_ci 10363d0407baSopenharmony_ci iova += pte_count * SPAGE_SIZE; 10373d0407baSopenharmony_ci page_phys = rk_pte_page_address(pte_addr[pte_count]); 10383d0407baSopenharmony_ci pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", &iova, &page_phys, &paddr, prot); 10393d0407baSopenharmony_ci 10403d0407baSopenharmony_ci return -EADDRINUSE; 10413d0407baSopenharmony_ci} 10423d0407baSopenharmony_ci 10433d0407baSopenharmony_cistatic int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr, dma_addr_t pte_dma, dma_addr_t iova, 10443d0407baSopenharmony_ci phys_addr_t paddr, size_t size, int prot) 10453d0407baSopenharmony_ci{ 10463d0407baSopenharmony_ci unsigned int pte_count; 10473d0407baSopenharmony_ci unsigned int pte_total = size / SPAGE_SIZE; 10483d0407baSopenharmony_ci phys_addr_t page_phys; 10493d0407baSopenharmony_ci 10503d0407baSopenharmony_ci assert_spin_locked(&rk_domain->dt_lock); 10513d0407baSopenharmony_ci 10523d0407baSopenharmony_ci for (pte_count = 0; pte_count < pte_total; pte_count++) { 10533d0407baSopenharmony_ci u32 pte = pte_addr[pte_count]; 10543d0407baSopenharmony_ci 10553d0407baSopenharmony_ci if (rk_pte_is_page_valid(pte)) { 10563d0407baSopenharmony_ci goto unwind; 10573d0407baSopenharmony_ci } 10583d0407baSopenharmony_ci 10593d0407baSopenharmony_ci pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot); 10603d0407baSopenharmony_ci 10613d0407baSopenharmony_ci paddr += SPAGE_SIZE; 10623d0407baSopenharmony_ci } 10633d0407baSopenharmony_ci 10643d0407baSopenharmony_ci rk_table_flush(rk_domain, pte_dma, pte_total); 10653d0407baSopenharmony_ci 10663d0407baSopenharmony_ci /* 10673d0407baSopenharmony_ci * Zap the first and last iova to evict from iotlb any previously 10683d0407baSopenharmony_ci * mapped cachelines holding stale values for its dte and pte. 10693d0407baSopenharmony_ci * We only zap the first and last iova, since only they could have 10703d0407baSopenharmony_ci * dte or pte shared with an existing mapping. 10713d0407baSopenharmony_ci */ 10723d0407baSopenharmony_ci /* Do not zap tlb cache line if shootdown_entire set */ 10733d0407baSopenharmony_ci if (!rk_domain->shootdown_entire) { 10743d0407baSopenharmony_ci rk_iommu_zap_iova_first_last(rk_domain, iova, size); 10753d0407baSopenharmony_ci } 10763d0407baSopenharmony_ci 10773d0407baSopenharmony_ci return 0; 10783d0407baSopenharmony_ciunwind: 10793d0407baSopenharmony_ci /* Unmap the range of iovas that we just mapped */ 10803d0407baSopenharmony_ci rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, pte_count * SPAGE_SIZE); 10813d0407baSopenharmony_ci 10823d0407baSopenharmony_ci iova += pte_count * SPAGE_SIZE; 10833d0407baSopenharmony_ci page_phys = rk_pte_page_address_v2(pte_addr[pte_count]); 10843d0407baSopenharmony_ci pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", &iova, &page_phys, &paddr, prot); 10853d0407baSopenharmony_ci 10863d0407baSopenharmony_ci return -EADDRINUSE; 10873d0407baSopenharmony_ci} 10883d0407baSopenharmony_ci 10893d0407baSopenharmony_cistatic int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, phys_addr_t paddr, size_t size, int prot, 10903d0407baSopenharmony_ci gfp_t gfp) 10913d0407baSopenharmony_ci{ 10923d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 10933d0407baSopenharmony_ci unsigned long flags; 10943d0407baSopenharmony_ci dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 10953d0407baSopenharmony_ci u32 *page_table, *pte_addr; 10963d0407baSopenharmony_ci u32 dte, pte_index; 10973d0407baSopenharmony_ci int ret; 10983d0407baSopenharmony_ci 10993d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->dt_lock, flags); 11003d0407baSopenharmony_ci 11013d0407baSopenharmony_ci /* 11023d0407baSopenharmony_ci * pgsize_bitmap specifies iova sizes that fit in one page table 11033d0407baSopenharmony_ci * (1024 4-KiB pages = 4 MiB). 11043d0407baSopenharmony_ci * So, size will always be 4096 <= size <= 4194304. 11053d0407baSopenharmony_ci * Since iommu_map() guarantees that both iova and size will be 11063d0407baSopenharmony_ci * aligned, we will always only be mapping from a single dte here. 11073d0407baSopenharmony_ci */ 11083d0407baSopenharmony_ci page_table = rk_dte_get_page_table(rk_domain, iova); 11093d0407baSopenharmony_ci if (IS_ERR(page_table)) { 11103d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 11113d0407baSopenharmony_ci return PTR_ERR(page_table); 11123d0407baSopenharmony_ci } 11133d0407baSopenharmony_ci 11143d0407baSopenharmony_ci dte = rk_domain->dt[rk_iova_dte_index(iova)]; 11153d0407baSopenharmony_ci pte_index = rk_iova_pte_index(iova); 11163d0407baSopenharmony_ci pte_addr = &page_table[pte_index]; 11173d0407baSopenharmony_ci pte_dma = rk_dte_pt_address(dte) + pte_index * sizeof(u32); 11183d0407baSopenharmony_ci ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot); 11193d0407baSopenharmony_ci 11203d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 11213d0407baSopenharmony_ci 11223d0407baSopenharmony_ci return ret; 11233d0407baSopenharmony_ci} 11243d0407baSopenharmony_ci 11253d0407baSopenharmony_cistatic int rk_iommu_map_v2(struct iommu_domain *domain, unsigned long _iova, phys_addr_t paddr, size_t size, int prot, 11263d0407baSopenharmony_ci gfp_t gfp) 11273d0407baSopenharmony_ci{ 11283d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 11293d0407baSopenharmony_ci unsigned long flags; 11303d0407baSopenharmony_ci dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 11313d0407baSopenharmony_ci u32 *page_table, *pte_addr; 11323d0407baSopenharmony_ci u32 dte, pte_index; 11333d0407baSopenharmony_ci int ret; 11343d0407baSopenharmony_ci 11353d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->dt_lock, flags); 11363d0407baSopenharmony_ci 11373d0407baSopenharmony_ci /* 11383d0407baSopenharmony_ci * pgsize_bitmap specifies iova sizes that fit in one page table 11393d0407baSopenharmony_ci * (1024 4-KiB pages = 4 MiB). 11403d0407baSopenharmony_ci * So, size will always be 4096 <= size <= 4194304. 11413d0407baSopenharmony_ci * Since iommu_map() guarantees that both iova and size will be 11423d0407baSopenharmony_ci * aligned, we will always only be mapping from a single dte here. 11433d0407baSopenharmony_ci */ 11443d0407baSopenharmony_ci page_table = rk_dte_get_page_table_v2(rk_domain, iova); 11453d0407baSopenharmony_ci if (IS_ERR(page_table)) { 11463d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 11473d0407baSopenharmony_ci return PTR_ERR(page_table); 11483d0407baSopenharmony_ci } 11493d0407baSopenharmony_ci 11503d0407baSopenharmony_ci dte = rk_domain->dt[rk_iova_dte_index(iova)]; 11513d0407baSopenharmony_ci pte_index = rk_iova_pte_index(iova); 11523d0407baSopenharmony_ci pte_addr = &page_table[pte_index]; 11533d0407baSopenharmony_ci pte_dma = rk_dte_pt_address_v2(dte) + pte_index * sizeof(u32); 11543d0407baSopenharmony_ci ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot); 11553d0407baSopenharmony_ci 11563d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 11573d0407baSopenharmony_ci 11583d0407baSopenharmony_ci return ret; 11593d0407baSopenharmony_ci} 11603d0407baSopenharmony_ci 11613d0407baSopenharmony_cistatic size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, size_t size, 11623d0407baSopenharmony_ci struct iommu_iotlb_gather *gather) 11633d0407baSopenharmony_ci{ 11643d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 11653d0407baSopenharmony_ci unsigned long flags; 11663d0407baSopenharmony_ci dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 11673d0407baSopenharmony_ci phys_addr_t pt_phys; 11683d0407baSopenharmony_ci u32 dte; 11693d0407baSopenharmony_ci u32 *pte_addr; 11703d0407baSopenharmony_ci size_t unmap_size; 11713d0407baSopenharmony_ci 11723d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->dt_lock, flags); 11733d0407baSopenharmony_ci 11743d0407baSopenharmony_ci /* 11753d0407baSopenharmony_ci * pgsize_bitmap specifies iova sizes that fit in one page table 11763d0407baSopenharmony_ci * (1024 4-KiB pages = 4 MiB). 11773d0407baSopenharmony_ci * So, size will always be 4096 <= size <= 4194304. 11783d0407baSopenharmony_ci * Since iommu_unmap() guarantees that both iova and size will be 11793d0407baSopenharmony_ci * aligned, we will always only be unmapping from a single dte here. 11803d0407baSopenharmony_ci */ 11813d0407baSopenharmony_ci dte = rk_domain->dt[rk_iova_dte_index(iova)]; 11823d0407baSopenharmony_ci /* Just return 0 if iova is unmapped */ 11833d0407baSopenharmony_ci if (!rk_dte_is_pt_valid(dte)) { 11843d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 11853d0407baSopenharmony_ci return 0; 11863d0407baSopenharmony_ci } 11873d0407baSopenharmony_ci 11883d0407baSopenharmony_ci pt_phys = rk_dte_pt_address(dte); 11893d0407baSopenharmony_ci pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); 11903d0407baSopenharmony_ci pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); 11913d0407baSopenharmony_ci unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); 11923d0407baSopenharmony_ci 11933d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 11943d0407baSopenharmony_ci 11953d0407baSopenharmony_ci /* Shootdown iotlb entries for iova range that was just unmapped */ 11963d0407baSopenharmony_ci rk_iommu_zap_iova(rk_domain, iova, unmap_size); 11973d0407baSopenharmony_ci 11983d0407baSopenharmony_ci return unmap_size; 11993d0407baSopenharmony_ci} 12003d0407baSopenharmony_ci 12013d0407baSopenharmony_cistatic size_t rk_iommu_unmap_v2(struct iommu_domain *domain, unsigned long _iova, size_t size, 12023d0407baSopenharmony_ci struct iommu_iotlb_gather *gather) 12033d0407baSopenharmony_ci{ 12043d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 12053d0407baSopenharmony_ci unsigned long flags; 12063d0407baSopenharmony_ci dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 12073d0407baSopenharmony_ci phys_addr_t pt_phys; 12083d0407baSopenharmony_ci u32 dte; 12093d0407baSopenharmony_ci u32 *pte_addr; 12103d0407baSopenharmony_ci size_t unmap_size; 12113d0407baSopenharmony_ci 12123d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->dt_lock, flags); 12133d0407baSopenharmony_ci 12143d0407baSopenharmony_ci /* 12153d0407baSopenharmony_ci * pgsize_bitmap specifies iova sizes that fit in one page table 12163d0407baSopenharmony_ci * (1024 4-KiB pages = 4 MiB). 12173d0407baSopenharmony_ci * So, size will always be 4096 <= size <= 4194304. 12183d0407baSopenharmony_ci * Since iommu_unmap() guarantees that both iova and size will be 12193d0407baSopenharmony_ci * aligned, we will always only be unmapping from a single dte here. 12203d0407baSopenharmony_ci */ 12213d0407baSopenharmony_ci dte = rk_domain->dt[rk_iova_dte_index(iova)]; 12223d0407baSopenharmony_ci /* Just return 0 if iova is unmapped */ 12233d0407baSopenharmony_ci if (!rk_dte_is_pt_valid(dte)) { 12243d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 12253d0407baSopenharmony_ci return 0; 12263d0407baSopenharmony_ci } 12273d0407baSopenharmony_ci 12283d0407baSopenharmony_ci pt_phys = rk_dte_pt_address_v2(dte); 12293d0407baSopenharmony_ci pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); 12303d0407baSopenharmony_ci pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); 12313d0407baSopenharmony_ci unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); 12323d0407baSopenharmony_ci 12333d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 12343d0407baSopenharmony_ci 12353d0407baSopenharmony_ci /* Shootdown iotlb entries for iova range that was just unmapped */ 12363d0407baSopenharmony_ci /* Do not zap tlb cache line if shootdown_entire set */ 12373d0407baSopenharmony_ci if (!rk_domain->shootdown_entire) { 12383d0407baSopenharmony_ci rk_iommu_zap_iova(rk_domain, iova, unmap_size); 12393d0407baSopenharmony_ci } 12403d0407baSopenharmony_ci 12413d0407baSopenharmony_ci return unmap_size; 12423d0407baSopenharmony_ci} 12433d0407baSopenharmony_ci 12443d0407baSopenharmony_cistatic void rk_iommu_flush_tlb_all(struct iommu_domain *domain) 12453d0407baSopenharmony_ci{ 12463d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 12473d0407baSopenharmony_ci struct list_head *pos; 12483d0407baSopenharmony_ci unsigned long flags; 12493d0407baSopenharmony_ci int i; 12503d0407baSopenharmony_ci 12513d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->iommus_lock, flags); 12523d0407baSopenharmony_ci list_for_each(pos, &rk_domain->iommus) 12533d0407baSopenharmony_ci { 12543d0407baSopenharmony_ci struct rk_iommu *iommu; 12553d0407baSopenharmony_ci int ret; 12563d0407baSopenharmony_ci 12573d0407baSopenharmony_ci iommu = list_entry(pos, struct rk_iommu, node); 12583d0407baSopenharmony_ci 12593d0407baSopenharmony_ci ret = pm_runtime_get_if_in_use(iommu->dev); 12603d0407baSopenharmony_ci if (WARN_ON_ONCE(ret < 0)) { 12613d0407baSopenharmony_ci continue; 12623d0407baSopenharmony_ci } 12633d0407baSopenharmony_ci if (ret) { 12643d0407baSopenharmony_ci WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 12653d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 12663d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND, RK_MMU_CMD_ZAP_CACHE); 12673d0407baSopenharmony_ci } 12683d0407baSopenharmony_ci clk_bulk_disable(iommu->num_clocks, iommu->clocks); 12693d0407baSopenharmony_ci pm_runtime_put(iommu->dev); 12703d0407baSopenharmony_ci } 12713d0407baSopenharmony_ci } 12723d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 12733d0407baSopenharmony_ci} 12743d0407baSopenharmony_ci 12753d0407baSopenharmony_cistatic struct rk_iommu *rk_iommu_from_dev(struct device *dev) 12763d0407baSopenharmony_ci{ 12773d0407baSopenharmony_ci struct rk_iommudata *data = dev_iommu_priv_get(dev); 12783d0407baSopenharmony_ci 12793d0407baSopenharmony_ci return data ? data->iommu : NULL; 12803d0407baSopenharmony_ci} 12813d0407baSopenharmony_ci 12823d0407baSopenharmony_ci/* Must be called with iommu powered on and attached */ 12833d0407baSopenharmony_cistatic void rk_iommu_disable(struct rk_iommu *iommu) 12843d0407baSopenharmony_ci{ 12853d0407baSopenharmony_ci int i; 12863d0407baSopenharmony_ci 12873d0407baSopenharmony_ci /* Ignore error while disabling, just keep going */ 12883d0407baSopenharmony_ci WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 12893d0407baSopenharmony_ci rk_iommu_enable_stall(iommu); 12903d0407baSopenharmony_ci rk_iommu_disable_paging(iommu); 12913d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 12923d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); 12933d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); 12943d0407baSopenharmony_ci } 12953d0407baSopenharmony_ci rk_iommu_disable_stall(iommu); 12963d0407baSopenharmony_ci clk_bulk_disable(iommu->num_clocks, iommu->clocks); 12973d0407baSopenharmony_ci} 12983d0407baSopenharmony_ci 12993d0407baSopenharmony_ciint rockchip_iommu_disable(struct device *dev) 13003d0407baSopenharmony_ci{ 13013d0407baSopenharmony_ci struct rk_iommu *iommu; 13023d0407baSopenharmony_ci 13033d0407baSopenharmony_ci iommu = rk_iommu_from_dev(dev); 13043d0407baSopenharmony_ci if (!iommu) { 13053d0407baSopenharmony_ci return -ENODEV; 13063d0407baSopenharmony_ci } 13073d0407baSopenharmony_ci 13083d0407baSopenharmony_ci rk_iommu_disable(iommu); 13093d0407baSopenharmony_ci 13103d0407baSopenharmony_ci return 0; 13113d0407baSopenharmony_ci} 13123d0407baSopenharmony_ciEXPORT_SYMBOL(rockchip_iommu_disable); 13133d0407baSopenharmony_ci 13143d0407baSopenharmony_ci/* Must be called with iommu powered on and attached */ 13153d0407baSopenharmony_cistatic int rk_iommu_enable(struct rk_iommu *iommu) 13163d0407baSopenharmony_ci{ 13173d0407baSopenharmony_ci struct iommu_domain *domain = iommu->domain; 13183d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 13193d0407baSopenharmony_ci int ret, i; 13203d0407baSopenharmony_ci u32 dt_v2; 13213d0407baSopenharmony_ci u32 auto_gate; 13223d0407baSopenharmony_ci 13233d0407baSopenharmony_ci ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); 13243d0407baSopenharmony_ci if (ret) { 13253d0407baSopenharmony_ci return ret; 13263d0407baSopenharmony_ci } 13273d0407baSopenharmony_ci 13283d0407baSopenharmony_ci ret = rk_iommu_enable_stall(iommu); 13293d0407baSopenharmony_ci if (ret) { 13303d0407baSopenharmony_ci goto out_disable_clocks; 13313d0407baSopenharmony_ci } 13323d0407baSopenharmony_ci 13333d0407baSopenharmony_ci ret = rk_iommu_force_reset(iommu); 13343d0407baSopenharmony_ci if (ret) { 13353d0407baSopenharmony_ci goto out_disable_stall; 13363d0407baSopenharmony_ci } 13373d0407baSopenharmony_ci 13383d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 13393d0407baSopenharmony_ci if (iommu->version >= 0x2) { 13403d0407baSopenharmony_ci dt_v2 = (rk_domain->dt_dma & DT_LO_MASK) | ((rk_domain->dt_dma & DT_HI_MASK) >> DT_SHIFT); 13413d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2); 13423d0407baSopenharmony_ci } else { 13433d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, rk_domain->dt_dma); 13443d0407baSopenharmony_ci } 13453d0407baSopenharmony_ci rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 13463d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 13473d0407baSopenharmony_ci 13483d0407baSopenharmony_ci /* Workaround for iommu blocked, BIT(31) default to 1 */ 13493d0407baSopenharmony_ci auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING); 13503d0407baSopenharmony_ci auto_gate |= DISABLE_FETCH_DTE_TIME_LIMIT; 13513d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate); 13523d0407baSopenharmony_ci } 13533d0407baSopenharmony_ci 13543d0407baSopenharmony_ci ret = rk_iommu_enable_paging(iommu); 13553d0407baSopenharmony_ci 13563d0407baSopenharmony_ciout_disable_stall: 13573d0407baSopenharmony_ci rk_iommu_disable_stall(iommu); 13583d0407baSopenharmony_ciout_disable_clocks: 13593d0407baSopenharmony_ci clk_bulk_disable(iommu->num_clocks, iommu->clocks); 13603d0407baSopenharmony_ci return ret; 13613d0407baSopenharmony_ci} 13623d0407baSopenharmony_ci 13633d0407baSopenharmony_ciint rockchip_iommu_enable(struct device *dev) 13643d0407baSopenharmony_ci{ 13653d0407baSopenharmony_ci struct rk_iommu *iommu; 13663d0407baSopenharmony_ci 13673d0407baSopenharmony_ci iommu = rk_iommu_from_dev(dev); 13683d0407baSopenharmony_ci if (!iommu) { 13693d0407baSopenharmony_ci return -ENODEV; 13703d0407baSopenharmony_ci } 13713d0407baSopenharmony_ci 13723d0407baSopenharmony_ci return rk_iommu_enable(iommu); 13733d0407baSopenharmony_ci} 13743d0407baSopenharmony_ciEXPORT_SYMBOL(rockchip_iommu_enable); 13753d0407baSopenharmony_ci 13763d0407baSopenharmony_cistatic void rk_iommu_detach_device(struct iommu_domain *domain, struct device *dev) 13773d0407baSopenharmony_ci{ 13783d0407baSopenharmony_ci struct rk_iommu *iommu; 13793d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 13803d0407baSopenharmony_ci unsigned long flags; 13813d0407baSopenharmony_ci int ret; 13823d0407baSopenharmony_ci 13833d0407baSopenharmony_ci /* Allow 'virtual devices' (eg drm) to detach from domain */ 13843d0407baSopenharmony_ci iommu = rk_iommu_from_dev(dev); 13853d0407baSopenharmony_ci if (!iommu) { 13863d0407baSopenharmony_ci return; 13873d0407baSopenharmony_ci } 13883d0407baSopenharmony_ci 13893d0407baSopenharmony_ci dev_dbg(dev, "Detaching from iommu domain\n"); 13903d0407baSopenharmony_ci 13913d0407baSopenharmony_ci if (!iommu->domain) { 13923d0407baSopenharmony_ci return; 13933d0407baSopenharmony_ci } 13943d0407baSopenharmony_ci 13953d0407baSopenharmony_ci iommu->domain = NULL; 13963d0407baSopenharmony_ci 13973d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->iommus_lock, flags); 13983d0407baSopenharmony_ci list_del_init(&iommu->node); 13993d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 14003d0407baSopenharmony_ci 14013d0407baSopenharmony_ci ret = pm_runtime_get_if_in_use(iommu->dev); 14023d0407baSopenharmony_ci WARN_ON_ONCE(ret < 0); 14033d0407baSopenharmony_ci if (ret > 0) { 14043d0407baSopenharmony_ci rk_iommu_disable(iommu); 14053d0407baSopenharmony_ci pm_runtime_put(iommu->dev); 14063d0407baSopenharmony_ci } 14073d0407baSopenharmony_ci} 14083d0407baSopenharmony_ci 14093d0407baSopenharmony_cistatic int rk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) 14103d0407baSopenharmony_ci{ 14113d0407baSopenharmony_ci struct rk_iommu *iommu; 14123d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 14133d0407baSopenharmony_ci unsigned long flags; 14143d0407baSopenharmony_ci int ret; 14153d0407baSopenharmony_ci 14163d0407baSopenharmony_ci /* 14173d0407baSopenharmony_ci * Allow 'virtual devices' (e.g., drm) to attach to domain. 14183d0407baSopenharmony_ci * Such a device does not belong to an iommu group. 14193d0407baSopenharmony_ci */ 14203d0407baSopenharmony_ci iommu = rk_iommu_from_dev(dev); 14213d0407baSopenharmony_ci if (!iommu) { 14223d0407baSopenharmony_ci return 0; 14233d0407baSopenharmony_ci } 14243d0407baSopenharmony_ci 14253d0407baSopenharmony_ci dev_dbg(dev, "Attaching to iommu domain\n"); 14263d0407baSopenharmony_ci 14273d0407baSopenharmony_ci if (iommu->domain) { 14283d0407baSopenharmony_ci rk_iommu_detach_device(iommu->domain, dev); 14293d0407baSopenharmony_ci } 14303d0407baSopenharmony_ci 14313d0407baSopenharmony_ci iommu->domain = domain; 14323d0407baSopenharmony_ci 14333d0407baSopenharmony_ci /* Attach NULL for disable iommu */ 14343d0407baSopenharmony_ci if (!domain) { 14353d0407baSopenharmony_ci return 0; 14363d0407baSopenharmony_ci } 14373d0407baSopenharmony_ci 14383d0407baSopenharmony_ci spin_lock_irqsave(&rk_domain->iommus_lock, flags); 14393d0407baSopenharmony_ci list_add_tail(&iommu->node, &rk_domain->iommus); 14403d0407baSopenharmony_ci spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 14413d0407baSopenharmony_ci 14423d0407baSopenharmony_ci rk_domain->shootdown_entire = iommu->shootdown_entire; 14433d0407baSopenharmony_ci ret = pm_runtime_get_if_in_use(iommu->dev); 14443d0407baSopenharmony_ci if (!ret || WARN_ON_ONCE(ret < 0)) { 14453d0407baSopenharmony_ci return 0; 14463d0407baSopenharmony_ci } 14473d0407baSopenharmony_ci 14483d0407baSopenharmony_ci ret = rk_iommu_enable(iommu); 14493d0407baSopenharmony_ci if (ret) { 14503d0407baSopenharmony_ci rk_iommu_detach_device(iommu->domain, dev); 14513d0407baSopenharmony_ci } 14523d0407baSopenharmony_ci 14533d0407baSopenharmony_ci pm_runtime_put(iommu->dev); 14543d0407baSopenharmony_ci 14553d0407baSopenharmony_ci return ret; 14563d0407baSopenharmony_ci} 14573d0407baSopenharmony_ci 14583d0407baSopenharmony_cistatic struct iommu_domain *rk_iommu_domain_alloc(unsigned type) 14593d0407baSopenharmony_ci{ 14603d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain; 14613d0407baSopenharmony_ci 14623d0407baSopenharmony_ci if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) { 14633d0407baSopenharmony_ci return NULL; 14643d0407baSopenharmony_ci } 14653d0407baSopenharmony_ci 14663d0407baSopenharmony_ci if (!dma_dev) { 14673d0407baSopenharmony_ci return NULL; 14683d0407baSopenharmony_ci } 14693d0407baSopenharmony_ci 14703d0407baSopenharmony_ci rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); 14713d0407baSopenharmony_ci if (!rk_domain) { 14723d0407baSopenharmony_ci return NULL; 14733d0407baSopenharmony_ci } 14743d0407baSopenharmony_ci 14753d0407baSopenharmony_ci if (type == IOMMU_DOMAIN_DMA && iommu_get_dma_cookie(&rk_domain->domain)) { 14763d0407baSopenharmony_ci goto err_free_domain; 14773d0407baSopenharmony_ci } 14783d0407baSopenharmony_ci 14793d0407baSopenharmony_ci /* 14803d0407baSopenharmony_ci * rk32xx iommus use a 2 level pagetable. 14813d0407baSopenharmony_ci * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. 14823d0407baSopenharmony_ci * Allocate one 4 KiB page for each table. 14833d0407baSopenharmony_ci */ 14843d0407baSopenharmony_ci rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); 14853d0407baSopenharmony_ci if (!rk_domain->dt) { 14863d0407baSopenharmony_ci goto err_put_cookie; 14873d0407baSopenharmony_ci } 14883d0407baSopenharmony_ci 14893d0407baSopenharmony_ci rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, SPAGE_SIZE, DMA_TO_DEVICE); 14903d0407baSopenharmony_ci if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { 14913d0407baSopenharmony_ci dev_err(dma_dev, "DMA map error for DT\n"); 14923d0407baSopenharmony_ci goto err_free_dt; 14933d0407baSopenharmony_ci } 14943d0407baSopenharmony_ci 14953d0407baSopenharmony_ci rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); 14963d0407baSopenharmony_ci 14973d0407baSopenharmony_ci spin_lock_init(&rk_domain->iommus_lock); 14983d0407baSopenharmony_ci spin_lock_init(&rk_domain->dt_lock); 14993d0407baSopenharmony_ci INIT_LIST_HEAD(&rk_domain->iommus); 15003d0407baSopenharmony_ci 15013d0407baSopenharmony_ci rk_domain->domain.geometry.aperture_start = 0; 15023d0407baSopenharmony_ci rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(RK_DMA_BIT_MASK); 15033d0407baSopenharmony_ci rk_domain->domain.geometry.force_aperture = true; 15043d0407baSopenharmony_ci 15053d0407baSopenharmony_ci return &rk_domain->domain; 15063d0407baSopenharmony_ci 15073d0407baSopenharmony_cierr_free_dt: 15083d0407baSopenharmony_ci free_page((unsigned long)rk_domain->dt); 15093d0407baSopenharmony_cierr_put_cookie: 15103d0407baSopenharmony_ci if (type == IOMMU_DOMAIN_DMA) { 15113d0407baSopenharmony_ci iommu_put_dma_cookie(&rk_domain->domain); 15123d0407baSopenharmony_ci } 15133d0407baSopenharmony_cierr_free_domain: 15143d0407baSopenharmony_ci kfree(rk_domain); 15153d0407baSopenharmony_ci 15163d0407baSopenharmony_ci return NULL; 15173d0407baSopenharmony_ci} 15183d0407baSopenharmony_ci 15193d0407baSopenharmony_cistatic void rk_iommu_domain_free(struct iommu_domain *domain) 15203d0407baSopenharmony_ci{ 15213d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 15223d0407baSopenharmony_ci int i; 15233d0407baSopenharmony_ci 15243d0407baSopenharmony_ci WARN_ON(!list_empty(&rk_domain->iommus)); 15253d0407baSopenharmony_ci 15263d0407baSopenharmony_ci for (i = 0; i < NUM_DT_ENTRIES; i++) { 15273d0407baSopenharmony_ci u32 dte = rk_domain->dt[i]; 15283d0407baSopenharmony_ci if (rk_dte_is_pt_valid(dte)) { 15293d0407baSopenharmony_ci phys_addr_t pt_phys = rk_dte_pt_address(dte); 15303d0407baSopenharmony_ci u32 *page_table = phys_to_virt(pt_phys); 15313d0407baSopenharmony_ci dma_unmap_single(dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); 15323d0407baSopenharmony_ci free_page((unsigned long)page_table); 15333d0407baSopenharmony_ci } 15343d0407baSopenharmony_ci } 15353d0407baSopenharmony_ci 15363d0407baSopenharmony_ci dma_unmap_single(dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); 15373d0407baSopenharmony_ci free_page((unsigned long)rk_domain->dt); 15383d0407baSopenharmony_ci 15393d0407baSopenharmony_ci if (domain->type == IOMMU_DOMAIN_DMA) { 15403d0407baSopenharmony_ci iommu_put_dma_cookie(&rk_domain->domain); 15413d0407baSopenharmony_ci } 15423d0407baSopenharmony_ci kfree(rk_domain); 15433d0407baSopenharmony_ci} 15443d0407baSopenharmony_ci 15453d0407baSopenharmony_cistatic void rk_iommu_domain_free_v2(struct iommu_domain *domain) 15463d0407baSopenharmony_ci{ 15473d0407baSopenharmony_ci struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 15483d0407baSopenharmony_ci int i; 15493d0407baSopenharmony_ci 15503d0407baSopenharmony_ci WARN_ON(!list_empty(&rk_domain->iommus)); 15513d0407baSopenharmony_ci 15523d0407baSopenharmony_ci for (i = 0; i < NUM_DT_ENTRIES; i++) { 15533d0407baSopenharmony_ci u32 dte = rk_domain->dt[i]; 15543d0407baSopenharmony_ci 15553d0407baSopenharmony_ci if (rk_dte_is_pt_valid(dte)) { 15563d0407baSopenharmony_ci phys_addr_t pt_phys = rk_dte_pt_address_v2(dte); 15573d0407baSopenharmony_ci u32 *page_table = phys_to_virt(pt_phys); 15583d0407baSopenharmony_ci 15593d0407baSopenharmony_ci dma_unmap_single(dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); 15603d0407baSopenharmony_ci free_page((unsigned long)page_table); 15613d0407baSopenharmony_ci } 15623d0407baSopenharmony_ci } 15633d0407baSopenharmony_ci 15643d0407baSopenharmony_ci dma_unmap_single(dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); 15653d0407baSopenharmony_ci free_page((unsigned long)rk_domain->dt); 15663d0407baSopenharmony_ci 15673d0407baSopenharmony_ci if (domain->type == IOMMU_DOMAIN_DMA) { 15683d0407baSopenharmony_ci iommu_put_dma_cookie(&rk_domain->domain); 15693d0407baSopenharmony_ci } 15703d0407baSopenharmony_ci kfree(rk_domain); 15713d0407baSopenharmony_ci} 15723d0407baSopenharmony_ci 15733d0407baSopenharmony_cistatic struct iommu_device *rk_iommu_probe_device(struct device *dev) 15743d0407baSopenharmony_ci{ 15753d0407baSopenharmony_ci struct rk_iommudata *data; 15763d0407baSopenharmony_ci struct rk_iommu *iommu; 15773d0407baSopenharmony_ci 15783d0407baSopenharmony_ci data = dev_iommu_priv_get(dev); 15793d0407baSopenharmony_ci if (!data) { 15803d0407baSopenharmony_ci return ERR_PTR(-ENODEV); 15813d0407baSopenharmony_ci } 15823d0407baSopenharmony_ci 15833d0407baSopenharmony_ci iommu = rk_iommu_from_dev(dev); 15843d0407baSopenharmony_ci 15853d0407baSopenharmony_ci data->link = device_link_add(dev, iommu->dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); 15863d0407baSopenharmony_ci 15873d0407baSopenharmony_ci data->defer_attach = false; 15883d0407baSopenharmony_ci 15893d0407baSopenharmony_ci /* set max segment size for dev, needed for single chunk map */ 15903d0407baSopenharmony_ci if (!dev->dma_parms) { 15913d0407baSopenharmony_ci dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); 15923d0407baSopenharmony_ci } 15933d0407baSopenharmony_ci if (!dev->dma_parms) { 15943d0407baSopenharmony_ci return ERR_PTR(-ENOMEM); 15953d0407baSopenharmony_ci } 15963d0407baSopenharmony_ci 15973d0407baSopenharmony_ci dma_set_max_seg_size(dev, DMA_BIT_MASK(RK_DMA_BIT_MASK)); 15983d0407baSopenharmony_ci 15993d0407baSopenharmony_ci return &iommu->iommu; 16003d0407baSopenharmony_ci} 16013d0407baSopenharmony_ci 16023d0407baSopenharmony_cistatic void rk_iommu_release_device(struct device *dev) 16033d0407baSopenharmony_ci{ 16043d0407baSopenharmony_ci struct rk_iommudata *data = dev_iommu_priv_get(dev); 16053d0407baSopenharmony_ci 16063d0407baSopenharmony_ci device_link_del(data->link); 16073d0407baSopenharmony_ci} 16083d0407baSopenharmony_ci 16093d0407baSopenharmony_cistatic struct iommu_group *rk_iommu_device_group(struct device *dev) 16103d0407baSopenharmony_ci{ 16113d0407baSopenharmony_ci struct rk_iommu *iommu; 16123d0407baSopenharmony_ci 16133d0407baSopenharmony_ci iommu = rk_iommu_from_dev(dev); 16143d0407baSopenharmony_ci 16153d0407baSopenharmony_ci return iommu_group_ref_get(iommu->group); 16163d0407baSopenharmony_ci} 16173d0407baSopenharmony_ci 16183d0407baSopenharmony_cistatic bool rk_iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) 16193d0407baSopenharmony_ci{ 16203d0407baSopenharmony_ci struct rk_iommudata *data = dev_iommu_priv_get(dev); 16213d0407baSopenharmony_ci 16223d0407baSopenharmony_ci return data->defer_attach; 16233d0407baSopenharmony_ci} 16243d0407baSopenharmony_ci 16253d0407baSopenharmony_cistatic int rk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) 16263d0407baSopenharmony_ci{ 16273d0407baSopenharmony_ci struct platform_device *iommu_dev; 16283d0407baSopenharmony_ci struct rk_iommudata *data; 16293d0407baSopenharmony_ci 16303d0407baSopenharmony_ci data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL); 16313d0407baSopenharmony_ci if (!data) { 16323d0407baSopenharmony_ci return -ENOMEM; 16333d0407baSopenharmony_ci } 16343d0407baSopenharmony_ci 16353d0407baSopenharmony_ci iommu_dev = of_find_device_by_node(args->np); 16363d0407baSopenharmony_ci 16373d0407baSopenharmony_ci data->iommu = platform_get_drvdata(iommu_dev); 16383d0407baSopenharmony_ci 16393d0407baSopenharmony_ci if (strstr(dev_name(dev), "vop")) { 16403d0407baSopenharmony_ci data->defer_attach = true; 16413d0407baSopenharmony_ci } 16423d0407baSopenharmony_ci 16433d0407baSopenharmony_ci dev_iommu_priv_set(dev, data); 16443d0407baSopenharmony_ci 16453d0407baSopenharmony_ci platform_device_put(iommu_dev); 16463d0407baSopenharmony_ci 16473d0407baSopenharmony_ci return 0; 16483d0407baSopenharmony_ci} 16493d0407baSopenharmony_ci 16503d0407baSopenharmony_civoid rk_iommu_mask_irq(struct device *dev) 16513d0407baSopenharmony_ci{ 16523d0407baSopenharmony_ci struct rk_iommu *iommu = rk_iommu_from_dev(dev); 16533d0407baSopenharmony_ci int i; 16543d0407baSopenharmony_ci 16553d0407baSopenharmony_ci if (!iommu) { 16563d0407baSopenharmony_ci return; 16573d0407baSopenharmony_ci } 16583d0407baSopenharmony_ci 16593d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 16603d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); 16613d0407baSopenharmony_ci } 16623d0407baSopenharmony_ci} 16633d0407baSopenharmony_ciEXPORT_SYMBOL(rk_iommu_mask_irq); 16643d0407baSopenharmony_ci 16653d0407baSopenharmony_civoid rk_iommu_unmask_irq(struct device *dev) 16663d0407baSopenharmony_ci{ 16673d0407baSopenharmony_ci struct rk_iommu *iommu = rk_iommu_from_dev(dev); 16683d0407baSopenharmony_ci int i; 16693d0407baSopenharmony_ci 16703d0407baSopenharmony_ci if (!iommu) { 16713d0407baSopenharmony_ci return; 16723d0407baSopenharmony_ci } 16733d0407baSopenharmony_ci 16743d0407baSopenharmony_ci for (i = 0; i < iommu->num_mmu; i++) { 16753d0407baSopenharmony_ci /* Need to zap tlb in case of mapping during pagefault */ 16763d0407baSopenharmony_ci rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 16773d0407baSopenharmony_ci rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 16783d0407baSopenharmony_ci /* Leave iommu in pagefault state until mapping finished */ 16793d0407baSopenharmony_ci rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); 16803d0407baSopenharmony_ci } 16813d0407baSopenharmony_ci} 16823d0407baSopenharmony_ciEXPORT_SYMBOL(rk_iommu_unmask_irq); 16833d0407baSopenharmony_ci 16843d0407baSopenharmony_cistatic struct iommu_ops rk_iommu_ops = { 16853d0407baSopenharmony_ci .domain_alloc = rk_iommu_domain_alloc, 16863d0407baSopenharmony_ci .domain_free = rk_iommu_domain_free, 16873d0407baSopenharmony_ci .attach_dev = rk_iommu_attach_device, 16883d0407baSopenharmony_ci .detach_dev = rk_iommu_detach_device, 16893d0407baSopenharmony_ci .map = rk_iommu_map, 16903d0407baSopenharmony_ci .unmap = rk_iommu_unmap, 16913d0407baSopenharmony_ci .flush_iotlb_all = rk_iommu_flush_tlb_all, 16923d0407baSopenharmony_ci .probe_device = rk_iommu_probe_device, 16933d0407baSopenharmony_ci .release_device = rk_iommu_release_device, 16943d0407baSopenharmony_ci .iova_to_phys = rk_iommu_iova_to_phys, 16953d0407baSopenharmony_ci .is_attach_deferred = rk_iommu_is_attach_deferred, 16963d0407baSopenharmony_ci .device_group = rk_iommu_device_group, 16973d0407baSopenharmony_ci .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, 16983d0407baSopenharmony_ci .of_xlate = rk_iommu_of_xlate, 16993d0407baSopenharmony_ci}; 17003d0407baSopenharmony_ci 17013d0407baSopenharmony_cistatic struct iommu_ops rk_iommu_ops_v2 = { 17023d0407baSopenharmony_ci .domain_alloc = rk_iommu_domain_alloc, 17033d0407baSopenharmony_ci .domain_free = rk_iommu_domain_free_v2, 17043d0407baSopenharmony_ci .attach_dev = rk_iommu_attach_device, 17053d0407baSopenharmony_ci .detach_dev = rk_iommu_detach_device, 17063d0407baSopenharmony_ci .map = rk_iommu_map_v2, 17073d0407baSopenharmony_ci .unmap = rk_iommu_unmap_v2, 17083d0407baSopenharmony_ci .flush_iotlb_all = rk_iommu_flush_tlb_all, 17093d0407baSopenharmony_ci .probe_device = rk_iommu_probe_device, 17103d0407baSopenharmony_ci .release_device = rk_iommu_release_device, 17113d0407baSopenharmony_ci .iova_to_phys = rk_iommu_iova_to_phys_v2, 17123d0407baSopenharmony_ci .is_attach_deferred = rk_iommu_is_attach_deferred, 17133d0407baSopenharmony_ci .device_group = rk_iommu_device_group, 17143d0407baSopenharmony_ci .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, 17153d0407baSopenharmony_ci .of_xlate = rk_iommu_of_xlate, 17163d0407baSopenharmony_ci}; 17173d0407baSopenharmony_ci 17183d0407baSopenharmony_cistatic const struct rockchip_iommu_data iommu_data_v1 = { 17193d0407baSopenharmony_ci .version = 0x1, 17203d0407baSopenharmony_ci}; 17213d0407baSopenharmony_ci 17223d0407baSopenharmony_cistatic const struct rockchip_iommu_data iommu_data_v2 = { 17233d0407baSopenharmony_ci .version = 0x2, 17243d0407baSopenharmony_ci}; 17253d0407baSopenharmony_ci 17263d0407baSopenharmony_cistatic const struct of_device_id rk_iommu_dt_ids[] = { 17273d0407baSopenharmony_ci { 17283d0407baSopenharmony_ci .compatible = "rockchip,iommu", 17293d0407baSopenharmony_ci .data = &iommu_data_v1, 17303d0407baSopenharmony_ci }, 17313d0407baSopenharmony_ci { 17323d0407baSopenharmony_ci .compatible = "rockchip,iommu-v2", 17333d0407baSopenharmony_ci .data = &iommu_data_v2, 17343d0407baSopenharmony_ci }, 17353d0407baSopenharmony_ci {} 17363d0407baSopenharmony_ci}; 17373d0407baSopenharmony_ci 17383d0407baSopenharmony_cistatic int rk_iommu_probe(struct platform_device *pdev) 17393d0407baSopenharmony_ci{ 17403d0407baSopenharmony_ci struct device *dev = &pdev->dev; 17413d0407baSopenharmony_ci struct rk_iommu *iommu; 17423d0407baSopenharmony_ci struct resource *res; 17433d0407baSopenharmony_ci int num_res = pdev->num_resources; 17443d0407baSopenharmony_ci int err, i; 17453d0407baSopenharmony_ci const struct of_device_id *match; 17463d0407baSopenharmony_ci struct rockchip_iommu_data *data; 17473d0407baSopenharmony_ci 17483d0407baSopenharmony_ci iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); 17493d0407baSopenharmony_ci if (!iommu) { 17503d0407baSopenharmony_ci return -ENOMEM; 17513d0407baSopenharmony_ci } 17523d0407baSopenharmony_ci 17533d0407baSopenharmony_ci match = of_match_device(rk_iommu_dt_ids, dev); 17543d0407baSopenharmony_ci if (!match) { 17553d0407baSopenharmony_ci return -EINVAL; 17563d0407baSopenharmony_ci } 17573d0407baSopenharmony_ci 17583d0407baSopenharmony_ci data = (struct rockchip_iommu_data *)match->data; 17593d0407baSopenharmony_ci iommu->version = data->version; 17603d0407baSopenharmony_ci dev_info(dev, "version = %x\n", iommu->version); 17613d0407baSopenharmony_ci 17623d0407baSopenharmony_ci platform_set_drvdata(pdev, iommu); 17633d0407baSopenharmony_ci iommu->dev = dev; 17643d0407baSopenharmony_ci iommu->num_mmu = 0; 17653d0407baSopenharmony_ci 17663d0407baSopenharmony_ci iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), GFP_KERNEL); 17673d0407baSopenharmony_ci if (!iommu->bases) { 17683d0407baSopenharmony_ci return -ENOMEM; 17693d0407baSopenharmony_ci } 17703d0407baSopenharmony_ci 17713d0407baSopenharmony_ci for (i = 0; i < num_res; i++) { 17723d0407baSopenharmony_ci res = platform_get_resource(pdev, IORESOURCE_MEM, i); 17733d0407baSopenharmony_ci if (!res) { 17743d0407baSopenharmony_ci continue; 17753d0407baSopenharmony_ci } 17763d0407baSopenharmony_ci iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); 17773d0407baSopenharmony_ci if (IS_ERR(iommu->bases[i])) { 17783d0407baSopenharmony_ci continue; 17793d0407baSopenharmony_ci } 17803d0407baSopenharmony_ci iommu->num_mmu++; 17813d0407baSopenharmony_ci } 17823d0407baSopenharmony_ci if (iommu->num_mmu == 0) { 17833d0407baSopenharmony_ci return PTR_ERR(iommu->bases[0]); 17843d0407baSopenharmony_ci } 17853d0407baSopenharmony_ci 17863d0407baSopenharmony_ci iommu->num_irq = platform_irq_count(pdev); 17873d0407baSopenharmony_ci if (iommu->num_irq < 0) { 17883d0407baSopenharmony_ci return iommu->num_irq; 17893d0407baSopenharmony_ci } 17903d0407baSopenharmony_ci 17913d0407baSopenharmony_ci iommu->reset_disabled = device_property_read_bool(dev, "rockchip,disable-mmu-reset"); 17923d0407baSopenharmony_ci iommu->skip_read = device_property_read_bool(dev, "rockchip,skip-mmu-read"); 17933d0407baSopenharmony_ci iommu->dlr_disable = device_property_read_bool(dev, "rockchip,disable-device-link-resume"); 17943d0407baSopenharmony_ci iommu->shootdown_entire = device_property_read_bool(dev, "rockchip,shootdown-entire"); 17953d0407baSopenharmony_ci 17963d0407baSopenharmony_ci if (of_machine_is_compatible("rockchip,rv1126") || of_machine_is_compatible("rockchip,rv1109")) { 17973d0407baSopenharmony_ci iommu->cmd_retry = device_property_read_bool(dev, "rockchip,enable-cmd-retry"); 17983d0407baSopenharmony_ci } 17993d0407baSopenharmony_ci 18003d0407baSopenharmony_ci /* 18013d0407baSopenharmony_ci * iommu clocks should be present for all new devices and devicetrees 18023d0407baSopenharmony_ci * but there are older devicetrees without clocks out in the wild. 18033d0407baSopenharmony_ci * So clocks as optional for the time being. 18043d0407baSopenharmony_ci */ 18053d0407baSopenharmony_ci err = devm_clk_bulk_get_all(dev, &iommu->clocks); 18063d0407baSopenharmony_ci if (err == -ENOENT) { 18073d0407baSopenharmony_ci iommu->num_clocks = 0; 18083d0407baSopenharmony_ci } else if (err < 0) { 18093d0407baSopenharmony_ci return err; 18103d0407baSopenharmony_ci } else { 18113d0407baSopenharmony_ci iommu->num_clocks = err; 18123d0407baSopenharmony_ci } 18133d0407baSopenharmony_ci 18143d0407baSopenharmony_ci err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); 18153d0407baSopenharmony_ci if (err) { 18163d0407baSopenharmony_ci return err; 18173d0407baSopenharmony_ci } 18183d0407baSopenharmony_ci 18193d0407baSopenharmony_ci iommu->group = iommu_group_alloc(); 18203d0407baSopenharmony_ci if (IS_ERR(iommu->group)) { 18213d0407baSopenharmony_ci err = PTR_ERR(iommu->group); 18223d0407baSopenharmony_ci goto err_unprepare_clocks; 18233d0407baSopenharmony_ci } 18243d0407baSopenharmony_ci 18253d0407baSopenharmony_ci err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); 18263d0407baSopenharmony_ci if (err) { 18273d0407baSopenharmony_ci goto err_put_group; 18283d0407baSopenharmony_ci } 18293d0407baSopenharmony_ci 18303d0407baSopenharmony_ci if (iommu->version >= 0x2) { 18313d0407baSopenharmony_ci iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2); 18323d0407baSopenharmony_ci } else { 18333d0407baSopenharmony_ci iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); 18343d0407baSopenharmony_ci } 18353d0407baSopenharmony_ci iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); 18363d0407baSopenharmony_ci 18373d0407baSopenharmony_ci err = iommu_device_register(&iommu->iommu); 18383d0407baSopenharmony_ci if (err) { 18393d0407baSopenharmony_ci goto err_remove_sysfs; 18403d0407baSopenharmony_ci } 18413d0407baSopenharmony_ci 18423d0407baSopenharmony_ci /* 18433d0407baSopenharmony_ci * Use the first registered IOMMU device for domain to use with DMA 18443d0407baSopenharmony_ci * API, since a domain might not physically correspond to a single 18453d0407baSopenharmony_ci * IOMMU device.. 18463d0407baSopenharmony_ci */ 18473d0407baSopenharmony_ci if (!dma_dev) { 18483d0407baSopenharmony_ci dma_dev = &pdev->dev; 18493d0407baSopenharmony_ci } 18503d0407baSopenharmony_ci 18513d0407baSopenharmony_ci if (iommu->version >= 0x2) { 18523d0407baSopenharmony_ci bus_set_iommu(&platform_bus_type, &rk_iommu_ops_v2); 18533d0407baSopenharmony_ci } else { 18543d0407baSopenharmony_ci bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 18553d0407baSopenharmony_ci } 18563d0407baSopenharmony_ci 18573d0407baSopenharmony_ci pm_runtime_enable(dev); 18583d0407baSopenharmony_ci 18593d0407baSopenharmony_ci if (iommu->skip_read) { 18603d0407baSopenharmony_ci goto skip_request_irq; 18613d0407baSopenharmony_ci } 18623d0407baSopenharmony_ci 18633d0407baSopenharmony_ci for (i = 0; i < iommu->num_irq; i++) { 18643d0407baSopenharmony_ci int irq = platform_get_irq(pdev, i); 18653d0407baSopenharmony_ci if (irq < 0) { 18663d0407baSopenharmony_ci return irq; 18673d0407baSopenharmony_ci } 18683d0407baSopenharmony_ci 18693d0407baSopenharmony_ci err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu); 18703d0407baSopenharmony_ci if (err) { 18713d0407baSopenharmony_ci pm_runtime_disable(dev); 18723d0407baSopenharmony_ci goto err_remove_sysfs; 18733d0407baSopenharmony_ci } 18743d0407baSopenharmony_ci } 18753d0407baSopenharmony_ci 18763d0407baSopenharmony_ciskip_request_irq: 18773d0407baSopenharmony_ci return 0; 18783d0407baSopenharmony_cierr_remove_sysfs: 18793d0407baSopenharmony_ci iommu_device_sysfs_remove(&iommu->iommu); 18803d0407baSopenharmony_cierr_put_group: 18813d0407baSopenharmony_ci iommu_group_put(iommu->group); 18823d0407baSopenharmony_cierr_unprepare_clocks: 18833d0407baSopenharmony_ci clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); 18843d0407baSopenharmony_ci return err; 18853d0407baSopenharmony_ci} 18863d0407baSopenharmony_ci 18873d0407baSopenharmony_cistatic void rk_iommu_shutdown(struct platform_device *pdev) 18883d0407baSopenharmony_ci{ 18893d0407baSopenharmony_ci struct rk_iommu *iommu = platform_get_drvdata(pdev); 18903d0407baSopenharmony_ci int i; 18913d0407baSopenharmony_ci 18923d0407baSopenharmony_ci for (i = 0; i < iommu->num_irq; i++) { 18933d0407baSopenharmony_ci int irq = platform_get_irq(pdev, i); 18943d0407baSopenharmony_ci 18953d0407baSopenharmony_ci devm_free_irq(iommu->dev, irq, iommu); 18963d0407baSopenharmony_ci } 18973d0407baSopenharmony_ci 18983d0407baSopenharmony_ci pm_runtime_force_suspend(&pdev->dev); 18993d0407baSopenharmony_ci} 19003d0407baSopenharmony_ci 19013d0407baSopenharmony_cistatic int __maybe_unused rk_iommu_suspend(struct device *dev) 19023d0407baSopenharmony_ci{ 19033d0407baSopenharmony_ci struct rk_iommu *iommu = dev_get_drvdata(dev); 19043d0407baSopenharmony_ci 19053d0407baSopenharmony_ci if (!iommu->domain) { 19063d0407baSopenharmony_ci return 0; 19073d0407baSopenharmony_ci } 19083d0407baSopenharmony_ci 19093d0407baSopenharmony_ci if (iommu->dlr_disable) { 19103d0407baSopenharmony_ci return 0; 19113d0407baSopenharmony_ci } 19123d0407baSopenharmony_ci 19133d0407baSopenharmony_ci rk_iommu_disable(iommu); 19143d0407baSopenharmony_ci return 0; 19153d0407baSopenharmony_ci} 19163d0407baSopenharmony_ci 19173d0407baSopenharmony_cistatic int __maybe_unused rk_iommu_resume(struct device *dev) 19183d0407baSopenharmony_ci{ 19193d0407baSopenharmony_ci struct rk_iommu *iommu = dev_get_drvdata(dev); 19203d0407baSopenharmony_ci 19213d0407baSopenharmony_ci if (!iommu->domain) { 19223d0407baSopenharmony_ci return 0; 19233d0407baSopenharmony_ci } 19243d0407baSopenharmony_ci 19253d0407baSopenharmony_ci if (iommu->dlr_disable) { 19263d0407baSopenharmony_ci return 0; 19273d0407baSopenharmony_ci } 19283d0407baSopenharmony_ci 19293d0407baSopenharmony_ci return rk_iommu_enable(iommu); 19303d0407baSopenharmony_ci} 19313d0407baSopenharmony_ci 19323d0407baSopenharmony_cistatic const struct dev_pm_ops rk_iommu_pm_ops = { 19333d0407baSopenharmony_ci SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL) 19343d0407baSopenharmony_ci SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)}; 19353d0407baSopenharmony_ci 19363d0407baSopenharmony_cistatic struct platform_driver rk_iommu_driver = { 19373d0407baSopenharmony_ci .probe = rk_iommu_probe, 19383d0407baSopenharmony_ci .shutdown = rk_iommu_shutdown, 19393d0407baSopenharmony_ci .driver = 19403d0407baSopenharmony_ci { 19413d0407baSopenharmony_ci .name = "rk_iommu", 19423d0407baSopenharmony_ci .of_match_table = rk_iommu_dt_ids, 19433d0407baSopenharmony_ci .pm = &rk_iommu_pm_ops, 19443d0407baSopenharmony_ci .suppress_bind_attrs = true, 19453d0407baSopenharmony_ci }, 19463d0407baSopenharmony_ci}; 19473d0407baSopenharmony_ci 19483d0407baSopenharmony_cistatic int __init rk_iommu_init(void) 19493d0407baSopenharmony_ci{ 19503d0407baSopenharmony_ci return platform_driver_register(&rk_iommu_driver); 19513d0407baSopenharmony_ci} 19523d0407baSopenharmony_cisubsys_initcall(rk_iommu_init); 19533d0407baSopenharmony_ci 19543d0407baSopenharmony_ciMODULE_DESCRIPTION("IOMMU API for Rockchip"); 19553d0407baSopenharmony_ciMODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); 19563d0407baSopenharmony_ciMODULE_ALIAS("platform:rockchip-iommu"); 19573d0407baSopenharmony_ciMODULE_LICENSE("GPL v2"); 1958