18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci// Copyright (c) 2017-2018 MediaTek Inc. 38c2ecf20Sopenharmony_ci 48c2ecf20Sopenharmony_ci/* 58c2ecf20Sopenharmony_ci * Driver for MediaTek High-Speed DMA Controller 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * Author: Sean Wang <sean.wang@mediatek.com> 88c2ecf20Sopenharmony_ci * 98c2ecf20Sopenharmony_ci */ 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_ci#include <linux/bitops.h> 128c2ecf20Sopenharmony_ci#include <linux/clk.h> 138c2ecf20Sopenharmony_ci#include <linux/dmaengine.h> 148c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h> 158c2ecf20Sopenharmony_ci#include <linux/err.h> 168c2ecf20Sopenharmony_ci#include <linux/iopoll.h> 178c2ecf20Sopenharmony_ci#include <linux/list.h> 188c2ecf20Sopenharmony_ci#include <linux/module.h> 198c2ecf20Sopenharmony_ci#include <linux/of.h> 208c2ecf20Sopenharmony_ci#include <linux/of_device.h> 218c2ecf20Sopenharmony_ci#include <linux/of_dma.h> 228c2ecf20Sopenharmony_ci#include <linux/platform_device.h> 238c2ecf20Sopenharmony_ci#include <linux/pm_runtime.h> 248c2ecf20Sopenharmony_ci#include <linux/refcount.h> 258c2ecf20Sopenharmony_ci#include <linux/slab.h> 268c2ecf20Sopenharmony_ci 278c2ecf20Sopenharmony_ci#include "../virt-dma.h" 288c2ecf20Sopenharmony_ci 298c2ecf20Sopenharmony_ci#define MTK_HSDMA_USEC_POLL 20 308c2ecf20Sopenharmony_ci#define MTK_HSDMA_TIMEOUT_POLL 200000 318c2ecf20Sopenharmony_ci#define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci/* The default number of virtual channel */ 348c2ecf20Sopenharmony_ci#define MTK_HSDMA_NR_VCHANS 3 358c2ecf20Sopenharmony_ci 368c2ecf20Sopenharmony_ci/* Only one physical channel supported */ 378c2ecf20Sopenharmony_ci#define MTK_HSDMA_NR_MAX_PCHANS 1 388c2ecf20Sopenharmony_ci 398c2ecf20Sopenharmony_ci/* Macro for physical descriptor (PD) manipulation */ 408c2ecf20Sopenharmony_ci/* The number of PD which must be 2 of power */ 418c2ecf20Sopenharmony_ci#define MTK_DMA_SIZE 64 428c2ecf20Sopenharmony_ci#define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1)) 438c2ecf20Sopenharmony_ci#define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1)) 448c2ecf20Sopenharmony_ci#define MTK_HSDMA_MAX_LEN 0x3f80 458c2ecf20Sopenharmony_ci#define MTK_HSDMA_ALIGN_SIZE 4 468c2ecf20Sopenharmony_ci#define MTK_HSDMA_PLEN_MASK 0x3fff 478c2ecf20Sopenharmony_ci#define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16) 488c2ecf20Sopenharmony_ci#define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK) 498c2ecf20Sopenharmony_ci 508c2ecf20Sopenharmony_ci/* Registers for underlying ring manipulation */ 518c2ecf20Sopenharmony_ci#define MTK_HSDMA_TX_BASE 0x0 528c2ecf20Sopenharmony_ci#define MTK_HSDMA_TX_CNT 0x4 538c2ecf20Sopenharmony_ci#define MTK_HSDMA_TX_CPU 0x8 548c2ecf20Sopenharmony_ci#define MTK_HSDMA_TX_DMA 0xc 558c2ecf20Sopenharmony_ci#define MTK_HSDMA_RX_BASE 0x100 568c2ecf20Sopenharmony_ci#define MTK_HSDMA_RX_CNT 0x104 578c2ecf20Sopenharmony_ci#define MTK_HSDMA_RX_CPU 0x108 588c2ecf20Sopenharmony_ci#define MTK_HSDMA_RX_DMA 0x10c 598c2ecf20Sopenharmony_ci 608c2ecf20Sopenharmony_ci/* Registers for global setup */ 618c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO 0x204 628c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_MULTI_DMA BIT(10) 638c2ecf20Sopenharmony_ci#define MTK_HSDMA_TX_WB_DDONE BIT(6) 648c2ecf20Sopenharmony_ci#define MTK_HSDMA_BURST_64BYTES (0x2 << 4) 658c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_RX_BUSY BIT(3) 668c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_RX_DMA BIT(2) 678c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_TX_BUSY BIT(1) 688c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_TX_DMA BIT(0) 698c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \ 708c2ecf20Sopenharmony_ci MTK_HSDMA_GLO_RX_DMA) 718c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \ 728c2ecf20Sopenharmony_ci MTK_HSDMA_GLO_TX_BUSY) 738c2ecf20Sopenharmony_ci#define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \ 748c2ecf20Sopenharmony_ci MTK_HSDMA_GLO_RX_DMA | \ 758c2ecf20Sopenharmony_ci MTK_HSDMA_TX_WB_DDONE | \ 768c2ecf20Sopenharmony_ci MTK_HSDMA_BURST_64BYTES | \ 778c2ecf20Sopenharmony_ci MTK_HSDMA_GLO_MULTI_DMA) 788c2ecf20Sopenharmony_ci 798c2ecf20Sopenharmony_ci/* Registers for reset */ 808c2ecf20Sopenharmony_ci#define MTK_HSDMA_RESET 0x208 818c2ecf20Sopenharmony_ci#define MTK_HSDMA_RST_TX BIT(0) 828c2ecf20Sopenharmony_ci#define MTK_HSDMA_RST_RX BIT(16) 838c2ecf20Sopenharmony_ci 848c2ecf20Sopenharmony_ci/* Registers for interrupt control */ 858c2ecf20Sopenharmony_ci#define MTK_HSDMA_DLYINT 0x20c 868c2ecf20Sopenharmony_ci#define MTK_HSDMA_RXDLY_INT_EN BIT(15) 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_ci/* Interrupt fires when the pending number's more than the specified */ 898c2ecf20Sopenharmony_ci#define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8) 908c2ecf20Sopenharmony_ci 918c2ecf20Sopenharmony_ci/* Interrupt fires when the pending time's more than the specified in 20 us */ 928c2ecf20Sopenharmony_ci#define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f) 938c2ecf20Sopenharmony_ci#define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \ 948c2ecf20Sopenharmony_ci MTK_HSDMA_RXMAX_PINT(20) | \ 958c2ecf20Sopenharmony_ci MTK_HSDMA_RXMAX_PTIME(20)) 968c2ecf20Sopenharmony_ci#define MTK_HSDMA_INT_STATUS 0x220 978c2ecf20Sopenharmony_ci#define MTK_HSDMA_INT_ENABLE 0x228 988c2ecf20Sopenharmony_ci#define MTK_HSDMA_INT_RXDONE BIT(16) 998c2ecf20Sopenharmony_ci 1008c2ecf20Sopenharmony_cienum mtk_hsdma_vdesc_flag { 1018c2ecf20Sopenharmony_ci MTK_HSDMA_VDESC_FINISHED = 0x01, 1028c2ecf20Sopenharmony_ci}; 1038c2ecf20Sopenharmony_ci 1048c2ecf20Sopenharmony_ci#define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED) 1058c2ecf20Sopenharmony_ci 1068c2ecf20Sopenharmony_ci/** 1078c2ecf20Sopenharmony_ci * struct mtk_hsdma_pdesc - This is the struct holding info describing physical 1088c2ecf20Sopenharmony_ci * descriptor (PD) and its placement must be kept at 1098c2ecf20Sopenharmony_ci * 4-bytes alignment in little endian order. 1108c2ecf20Sopenharmony_ci * @desc1: | The control pad used to indicate hardware how to 1118c2ecf20Sopenharmony_ci * @desc2: | deal with the descriptor such as source and 1128c2ecf20Sopenharmony_ci * @desc3: | destination address and data length. The maximum 1138c2ecf20Sopenharmony_ci * @desc4: | data length each pdesc can handle is 0x3f80 bytes 1148c2ecf20Sopenharmony_ci */ 1158c2ecf20Sopenharmony_cistruct mtk_hsdma_pdesc { 1168c2ecf20Sopenharmony_ci __le32 desc1; 1178c2ecf20Sopenharmony_ci __le32 desc2; 1188c2ecf20Sopenharmony_ci __le32 desc3; 1198c2ecf20Sopenharmony_ci __le32 desc4; 1208c2ecf20Sopenharmony_ci} __packed __aligned(4); 1218c2ecf20Sopenharmony_ci 1228c2ecf20Sopenharmony_ci/** 1238c2ecf20Sopenharmony_ci * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual 1248c2ecf20Sopenharmony_ci * descriptor (VD) 1258c2ecf20Sopenharmony_ci * @vd: An instance for struct virt_dma_desc 1268c2ecf20Sopenharmony_ci * @len: The total data size device wants to move 1278c2ecf20Sopenharmony_ci * @residue: The remaining data size device will move 1288c2ecf20Sopenharmony_ci * @dest: The destination address device wants to move to 1298c2ecf20Sopenharmony_ci * @src: The source address device wants to move from 1308c2ecf20Sopenharmony_ci */ 1318c2ecf20Sopenharmony_cistruct mtk_hsdma_vdesc { 1328c2ecf20Sopenharmony_ci struct virt_dma_desc vd; 1338c2ecf20Sopenharmony_ci size_t len; 1348c2ecf20Sopenharmony_ci size_t residue; 1358c2ecf20Sopenharmony_ci dma_addr_t dest; 1368c2ecf20Sopenharmony_ci dma_addr_t src; 1378c2ecf20Sopenharmony_ci}; 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci/** 1408c2ecf20Sopenharmony_ci * struct mtk_hsdma_cb - This is the struct holding extra info required for RX 1418c2ecf20Sopenharmony_ci * ring to know what relevant VD the the PD is being 1428c2ecf20Sopenharmony_ci * mapped to. 1438c2ecf20Sopenharmony_ci * @vd: Pointer to the relevant VD. 1448c2ecf20Sopenharmony_ci * @flag: Flag indicating what action should be taken when VD 1458c2ecf20Sopenharmony_ci * is completed. 1468c2ecf20Sopenharmony_ci */ 1478c2ecf20Sopenharmony_cistruct mtk_hsdma_cb { 1488c2ecf20Sopenharmony_ci struct virt_dma_desc *vd; 1498c2ecf20Sopenharmony_ci enum mtk_hsdma_vdesc_flag flag; 1508c2ecf20Sopenharmony_ci}; 1518c2ecf20Sopenharmony_ci 1528c2ecf20Sopenharmony_ci/** 1538c2ecf20Sopenharmony_ci * struct mtk_hsdma_ring - This struct holds info describing underlying ring 1548c2ecf20Sopenharmony_ci * space 1558c2ecf20Sopenharmony_ci * @txd: The descriptor TX ring which describes DMA source 1568c2ecf20Sopenharmony_ci * information 1578c2ecf20Sopenharmony_ci * @rxd: The descriptor RX ring which describes DMA 1588c2ecf20Sopenharmony_ci * destination information 1598c2ecf20Sopenharmony_ci * @cb: The extra information pointed at by RX ring 1608c2ecf20Sopenharmony_ci * @tphys: The physical addr of TX ring 1618c2ecf20Sopenharmony_ci * @rphys: The physical addr of RX ring 1628c2ecf20Sopenharmony_ci * @cur_tptr: Pointer to the next free descriptor used by the host 1638c2ecf20Sopenharmony_ci * @cur_rptr: Pointer to the last done descriptor by the device 1648c2ecf20Sopenharmony_ci */ 1658c2ecf20Sopenharmony_cistruct mtk_hsdma_ring { 1668c2ecf20Sopenharmony_ci struct mtk_hsdma_pdesc *txd; 1678c2ecf20Sopenharmony_ci struct mtk_hsdma_pdesc *rxd; 1688c2ecf20Sopenharmony_ci struct mtk_hsdma_cb *cb; 1698c2ecf20Sopenharmony_ci dma_addr_t tphys; 1708c2ecf20Sopenharmony_ci dma_addr_t rphys; 1718c2ecf20Sopenharmony_ci u16 cur_tptr; 1728c2ecf20Sopenharmony_ci u16 cur_rptr; 1738c2ecf20Sopenharmony_ci}; 1748c2ecf20Sopenharmony_ci 1758c2ecf20Sopenharmony_ci/** 1768c2ecf20Sopenharmony_ci * struct mtk_hsdma_pchan - This is the struct holding info describing physical 1778c2ecf20Sopenharmony_ci * channel (PC) 1788c2ecf20Sopenharmony_ci * @ring: An instance for the underlying ring 1798c2ecf20Sopenharmony_ci * @sz_ring: Total size allocated for the ring 1808c2ecf20Sopenharmony_ci * @nr_free: Total number of free rooms in the ring. It would 1818c2ecf20Sopenharmony_ci * be accessed and updated frequently between IRQ 1828c2ecf20Sopenharmony_ci * context and user context to reflect whether ring 1838c2ecf20Sopenharmony_ci * can accept requests from VD. 1848c2ecf20Sopenharmony_ci */ 1858c2ecf20Sopenharmony_cistruct mtk_hsdma_pchan { 1868c2ecf20Sopenharmony_ci struct mtk_hsdma_ring ring; 1878c2ecf20Sopenharmony_ci size_t sz_ring; 1888c2ecf20Sopenharmony_ci atomic_t nr_free; 1898c2ecf20Sopenharmony_ci}; 1908c2ecf20Sopenharmony_ci 1918c2ecf20Sopenharmony_ci/** 1928c2ecf20Sopenharmony_ci * struct mtk_hsdma_vchan - This is the struct holding info describing virtual 1938c2ecf20Sopenharmony_ci * channel (VC) 1948c2ecf20Sopenharmony_ci * @vc: An instance for struct virt_dma_chan 1958c2ecf20Sopenharmony_ci * @issue_completion: The wait for all issued descriptors completited 1968c2ecf20Sopenharmony_ci * @issue_synchronize: Bool indicating channel synchronization starts 1978c2ecf20Sopenharmony_ci * @desc_hw_processing: List those descriptors the hardware is processing, 1988c2ecf20Sopenharmony_ci * which is protected by vc.lock 1998c2ecf20Sopenharmony_ci */ 2008c2ecf20Sopenharmony_cistruct mtk_hsdma_vchan { 2018c2ecf20Sopenharmony_ci struct virt_dma_chan vc; 2028c2ecf20Sopenharmony_ci struct completion issue_completion; 2038c2ecf20Sopenharmony_ci bool issue_synchronize; 2048c2ecf20Sopenharmony_ci struct list_head desc_hw_processing; 2058c2ecf20Sopenharmony_ci}; 2068c2ecf20Sopenharmony_ci 2078c2ecf20Sopenharmony_ci/** 2088c2ecf20Sopenharmony_ci * struct mtk_hsdma_soc - This is the struct holding differences among SoCs 2098c2ecf20Sopenharmony_ci * @ddone: Bit mask for DDONE 2108c2ecf20Sopenharmony_ci * @ls0: Bit mask for LS0 2118c2ecf20Sopenharmony_ci */ 2128c2ecf20Sopenharmony_cistruct mtk_hsdma_soc { 2138c2ecf20Sopenharmony_ci __le32 ddone; 2148c2ecf20Sopenharmony_ci __le32 ls0; 2158c2ecf20Sopenharmony_ci}; 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_ci/** 2188c2ecf20Sopenharmony_ci * struct mtk_hsdma_device - This is the struct holding info describing HSDMA 2198c2ecf20Sopenharmony_ci * device 2208c2ecf20Sopenharmony_ci * @ddev: An instance for struct dma_device 2218c2ecf20Sopenharmony_ci * @base: The mapped register I/O base 2228c2ecf20Sopenharmony_ci * @clk: The clock that device internal is using 2238c2ecf20Sopenharmony_ci * @irq: The IRQ that device are using 2248c2ecf20Sopenharmony_ci * @dma_requests: The number of VCs the device supports to 2258c2ecf20Sopenharmony_ci * @vc: The pointer to all available VCs 2268c2ecf20Sopenharmony_ci * @pc: The pointer to the underlying PC 2278c2ecf20Sopenharmony_ci * @pc_refcnt: Track how many VCs are using the PC 2288c2ecf20Sopenharmony_ci * @lock: Lock protect agaisting multiple VCs access PC 2298c2ecf20Sopenharmony_ci * @soc: The pointer to area holding differences among 2308c2ecf20Sopenharmony_ci * vaious platform 2318c2ecf20Sopenharmony_ci */ 2328c2ecf20Sopenharmony_cistruct mtk_hsdma_device { 2338c2ecf20Sopenharmony_ci struct dma_device ddev; 2348c2ecf20Sopenharmony_ci void __iomem *base; 2358c2ecf20Sopenharmony_ci struct clk *clk; 2368c2ecf20Sopenharmony_ci u32 irq; 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_ci u32 dma_requests; 2398c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *vc; 2408c2ecf20Sopenharmony_ci struct mtk_hsdma_pchan *pc; 2418c2ecf20Sopenharmony_ci refcount_t pc_refcnt; 2428c2ecf20Sopenharmony_ci 2438c2ecf20Sopenharmony_ci /* Lock used to protect against multiple VCs access PC */ 2448c2ecf20Sopenharmony_ci spinlock_t lock; 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci const struct mtk_hsdma_soc *soc; 2478c2ecf20Sopenharmony_ci}; 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_cistatic struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan) 2508c2ecf20Sopenharmony_ci{ 2518c2ecf20Sopenharmony_ci return container_of(chan->device, struct mtk_hsdma_device, ddev); 2528c2ecf20Sopenharmony_ci} 2538c2ecf20Sopenharmony_ci 2548c2ecf20Sopenharmony_cistatic inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan) 2558c2ecf20Sopenharmony_ci{ 2568c2ecf20Sopenharmony_ci return container_of(chan, struct mtk_hsdma_vchan, vc.chan); 2578c2ecf20Sopenharmony_ci} 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_cistatic struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd) 2608c2ecf20Sopenharmony_ci{ 2618c2ecf20Sopenharmony_ci return container_of(vd, struct mtk_hsdma_vdesc, vd); 2628c2ecf20Sopenharmony_ci} 2638c2ecf20Sopenharmony_ci 2648c2ecf20Sopenharmony_cistatic struct device *hsdma2dev(struct mtk_hsdma_device *hsdma) 2658c2ecf20Sopenharmony_ci{ 2668c2ecf20Sopenharmony_ci return hsdma->ddev.dev; 2678c2ecf20Sopenharmony_ci} 2688c2ecf20Sopenharmony_ci 2698c2ecf20Sopenharmony_cistatic u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg) 2708c2ecf20Sopenharmony_ci{ 2718c2ecf20Sopenharmony_ci return readl(hsdma->base + reg); 2728c2ecf20Sopenharmony_ci} 2738c2ecf20Sopenharmony_ci 2748c2ecf20Sopenharmony_cistatic void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) 2758c2ecf20Sopenharmony_ci{ 2768c2ecf20Sopenharmony_ci writel(val, hsdma->base + reg); 2778c2ecf20Sopenharmony_ci} 2788c2ecf20Sopenharmony_ci 2798c2ecf20Sopenharmony_cistatic void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg, 2808c2ecf20Sopenharmony_ci u32 mask, u32 set) 2818c2ecf20Sopenharmony_ci{ 2828c2ecf20Sopenharmony_ci u32 val; 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci val = mtk_dma_read(hsdma, reg); 2858c2ecf20Sopenharmony_ci val &= ~mask; 2868c2ecf20Sopenharmony_ci val |= set; 2878c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, reg, val); 2888c2ecf20Sopenharmony_ci} 2898c2ecf20Sopenharmony_ci 2908c2ecf20Sopenharmony_cistatic void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) 2918c2ecf20Sopenharmony_ci{ 2928c2ecf20Sopenharmony_ci mtk_dma_rmw(hsdma, reg, 0, val); 2938c2ecf20Sopenharmony_ci} 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_cistatic void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) 2968c2ecf20Sopenharmony_ci{ 2978c2ecf20Sopenharmony_ci mtk_dma_rmw(hsdma, reg, val, 0); 2988c2ecf20Sopenharmony_ci} 2998c2ecf20Sopenharmony_ci 3008c2ecf20Sopenharmony_cistatic void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd) 3018c2ecf20Sopenharmony_ci{ 3028c2ecf20Sopenharmony_ci kfree(container_of(vd, struct mtk_hsdma_vdesc, vd)); 3038c2ecf20Sopenharmony_ci} 3048c2ecf20Sopenharmony_ci 3058c2ecf20Sopenharmony_cistatic int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma) 3068c2ecf20Sopenharmony_ci{ 3078c2ecf20Sopenharmony_ci u32 status = 0; 3088c2ecf20Sopenharmony_ci 3098c2ecf20Sopenharmony_ci return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status, 3108c2ecf20Sopenharmony_ci !(status & MTK_HSDMA_GLO_BUSY), 3118c2ecf20Sopenharmony_ci MTK_HSDMA_USEC_POLL, 3128c2ecf20Sopenharmony_ci MTK_HSDMA_TIMEOUT_POLL); 3138c2ecf20Sopenharmony_ci} 3148c2ecf20Sopenharmony_ci 3158c2ecf20Sopenharmony_cistatic int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, 3168c2ecf20Sopenharmony_ci struct mtk_hsdma_pchan *pc) 3178c2ecf20Sopenharmony_ci{ 3188c2ecf20Sopenharmony_ci struct mtk_hsdma_ring *ring = &pc->ring; 3198c2ecf20Sopenharmony_ci int err; 3208c2ecf20Sopenharmony_ci 3218c2ecf20Sopenharmony_ci memset(pc, 0, sizeof(*pc)); 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci /* 3248c2ecf20Sopenharmony_ci * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring 3258c2ecf20Sopenharmony_ci * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. 3268c2ecf20Sopenharmony_ci */ 3278c2ecf20Sopenharmony_ci pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); 3288c2ecf20Sopenharmony_ci ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 3298c2ecf20Sopenharmony_ci &ring->tphys, GFP_NOWAIT); 3308c2ecf20Sopenharmony_ci if (!ring->txd) 3318c2ecf20Sopenharmony_ci return -ENOMEM; 3328c2ecf20Sopenharmony_ci 3338c2ecf20Sopenharmony_ci ring->rxd = &ring->txd[MTK_DMA_SIZE]; 3348c2ecf20Sopenharmony_ci ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd); 3358c2ecf20Sopenharmony_ci ring->cur_tptr = 0; 3368c2ecf20Sopenharmony_ci ring->cur_rptr = MTK_DMA_SIZE - 1; 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_ci ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT); 3398c2ecf20Sopenharmony_ci if (!ring->cb) { 3408c2ecf20Sopenharmony_ci err = -ENOMEM; 3418c2ecf20Sopenharmony_ci goto err_free_dma; 3428c2ecf20Sopenharmony_ci } 3438c2ecf20Sopenharmony_ci 3448c2ecf20Sopenharmony_ci atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1); 3458c2ecf20Sopenharmony_ci 3468c2ecf20Sopenharmony_ci /* Disable HSDMA and wait for the completion */ 3478c2ecf20Sopenharmony_ci mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); 3488c2ecf20Sopenharmony_ci err = mtk_hsdma_busy_wait(hsdma); 3498c2ecf20Sopenharmony_ci if (err) 3508c2ecf20Sopenharmony_ci goto err_free_cb; 3518c2ecf20Sopenharmony_ci 3528c2ecf20Sopenharmony_ci /* Reset */ 3538c2ecf20Sopenharmony_ci mtk_dma_set(hsdma, MTK_HSDMA_RESET, 3548c2ecf20Sopenharmony_ci MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); 3558c2ecf20Sopenharmony_ci mtk_dma_clr(hsdma, MTK_HSDMA_RESET, 3568c2ecf20Sopenharmony_ci MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); 3578c2ecf20Sopenharmony_ci 3588c2ecf20Sopenharmony_ci /* Setup HSDMA initial pointer in the ring */ 3598c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys); 3608c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE); 3618c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); 3628c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0); 3638c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys); 3648c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE); 3658c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr); 3668c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0); 3678c2ecf20Sopenharmony_ci 3688c2ecf20Sopenharmony_ci /* Enable HSDMA */ 3698c2ecf20Sopenharmony_ci mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); 3708c2ecf20Sopenharmony_ci 3718c2ecf20Sopenharmony_ci /* Setup delayed interrupt */ 3728c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT); 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci /* Enable interrupt */ 3758c2ecf20Sopenharmony_ci mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); 3768c2ecf20Sopenharmony_ci 3778c2ecf20Sopenharmony_ci return 0; 3788c2ecf20Sopenharmony_ci 3798c2ecf20Sopenharmony_cierr_free_cb: 3808c2ecf20Sopenharmony_ci kfree(ring->cb); 3818c2ecf20Sopenharmony_ci 3828c2ecf20Sopenharmony_cierr_free_dma: 3838c2ecf20Sopenharmony_ci dma_free_coherent(hsdma2dev(hsdma), 3848c2ecf20Sopenharmony_ci pc->sz_ring, ring->txd, ring->tphys); 3858c2ecf20Sopenharmony_ci return err; 3868c2ecf20Sopenharmony_ci} 3878c2ecf20Sopenharmony_ci 3888c2ecf20Sopenharmony_cistatic void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma, 3898c2ecf20Sopenharmony_ci struct mtk_hsdma_pchan *pc) 3908c2ecf20Sopenharmony_ci{ 3918c2ecf20Sopenharmony_ci struct mtk_hsdma_ring *ring = &pc->ring; 3928c2ecf20Sopenharmony_ci 3938c2ecf20Sopenharmony_ci /* Disable HSDMA and then wait for the completion */ 3948c2ecf20Sopenharmony_ci mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); 3958c2ecf20Sopenharmony_ci mtk_hsdma_busy_wait(hsdma); 3968c2ecf20Sopenharmony_ci 3978c2ecf20Sopenharmony_ci /* Reset pointer in the ring */ 3988c2ecf20Sopenharmony_ci mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); 3998c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0); 4008c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0); 4018c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0); 4028c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0); 4038c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0); 4048c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1); 4058c2ecf20Sopenharmony_ci 4068c2ecf20Sopenharmony_ci kfree(ring->cb); 4078c2ecf20Sopenharmony_ci 4088c2ecf20Sopenharmony_ci dma_free_coherent(hsdma2dev(hsdma), 4098c2ecf20Sopenharmony_ci pc->sz_ring, ring->txd, ring->tphys); 4108c2ecf20Sopenharmony_ci} 4118c2ecf20Sopenharmony_ci 4128c2ecf20Sopenharmony_cistatic int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma, 4138c2ecf20Sopenharmony_ci struct mtk_hsdma_pchan *pc, 4148c2ecf20Sopenharmony_ci struct mtk_hsdma_vdesc *hvd) 4158c2ecf20Sopenharmony_ci{ 4168c2ecf20Sopenharmony_ci struct mtk_hsdma_ring *ring = &pc->ring; 4178c2ecf20Sopenharmony_ci struct mtk_hsdma_pdesc *txd, *rxd; 4188c2ecf20Sopenharmony_ci u16 reserved, prev, tlen, num_sgs; 4198c2ecf20Sopenharmony_ci unsigned long flags; 4208c2ecf20Sopenharmony_ci 4218c2ecf20Sopenharmony_ci /* Protect against PC is accessed by multiple VCs simultaneously */ 4228c2ecf20Sopenharmony_ci spin_lock_irqsave(&hsdma->lock, flags); 4238c2ecf20Sopenharmony_ci 4248c2ecf20Sopenharmony_ci /* 4258c2ecf20Sopenharmony_ci * Reserve rooms, where pc->nr_free is used to track how many free 4268c2ecf20Sopenharmony_ci * rooms in the ring being updated in user and IRQ context. 4278c2ecf20Sopenharmony_ci */ 4288c2ecf20Sopenharmony_ci num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN); 4298c2ecf20Sopenharmony_ci reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free)); 4308c2ecf20Sopenharmony_ci 4318c2ecf20Sopenharmony_ci if (!reserved) { 4328c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&hsdma->lock, flags); 4338c2ecf20Sopenharmony_ci return -ENOSPC; 4348c2ecf20Sopenharmony_ci } 4358c2ecf20Sopenharmony_ci 4368c2ecf20Sopenharmony_ci atomic_sub(reserved, &pc->nr_free); 4378c2ecf20Sopenharmony_ci 4388c2ecf20Sopenharmony_ci while (reserved--) { 4398c2ecf20Sopenharmony_ci /* Limit size by PD capability for valid data moving */ 4408c2ecf20Sopenharmony_ci tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ? 4418c2ecf20Sopenharmony_ci MTK_HSDMA_MAX_LEN : hvd->len; 4428c2ecf20Sopenharmony_ci 4438c2ecf20Sopenharmony_ci /* 4448c2ecf20Sopenharmony_ci * Setup PDs using the remaining VD info mapped on those 4458c2ecf20Sopenharmony_ci * reserved rooms. And since RXD is shared memory between the 4468c2ecf20Sopenharmony_ci * host and the device allocated by dma_alloc_coherent call, 4478c2ecf20Sopenharmony_ci * the helper macro WRITE_ONCE can ensure the data written to 4488c2ecf20Sopenharmony_ci * RAM would really happens. 4498c2ecf20Sopenharmony_ci */ 4508c2ecf20Sopenharmony_ci txd = &ring->txd[ring->cur_tptr]; 4518c2ecf20Sopenharmony_ci WRITE_ONCE(txd->desc1, hvd->src); 4528c2ecf20Sopenharmony_ci WRITE_ONCE(txd->desc2, 4538c2ecf20Sopenharmony_ci hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen)); 4548c2ecf20Sopenharmony_ci 4558c2ecf20Sopenharmony_ci rxd = &ring->rxd[ring->cur_tptr]; 4568c2ecf20Sopenharmony_ci WRITE_ONCE(rxd->desc1, hvd->dest); 4578c2ecf20Sopenharmony_ci WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen)); 4588c2ecf20Sopenharmony_ci 4598c2ecf20Sopenharmony_ci /* Associate VD, the PD belonged to */ 4608c2ecf20Sopenharmony_ci ring->cb[ring->cur_tptr].vd = &hvd->vd; 4618c2ecf20Sopenharmony_ci 4628c2ecf20Sopenharmony_ci /* Move forward the pointer of TX ring */ 4638c2ecf20Sopenharmony_ci ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr, 4648c2ecf20Sopenharmony_ci MTK_DMA_SIZE); 4658c2ecf20Sopenharmony_ci 4668c2ecf20Sopenharmony_ci /* Update VD with remaining data */ 4678c2ecf20Sopenharmony_ci hvd->src += tlen; 4688c2ecf20Sopenharmony_ci hvd->dest += tlen; 4698c2ecf20Sopenharmony_ci hvd->len -= tlen; 4708c2ecf20Sopenharmony_ci } 4718c2ecf20Sopenharmony_ci 4728c2ecf20Sopenharmony_ci /* 4738c2ecf20Sopenharmony_ci * Tagging flag for the last PD for VD will be responsible for 4748c2ecf20Sopenharmony_ci * completing VD. 4758c2ecf20Sopenharmony_ci */ 4768c2ecf20Sopenharmony_ci if (!hvd->len) { 4778c2ecf20Sopenharmony_ci prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE); 4788c2ecf20Sopenharmony_ci ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED; 4798c2ecf20Sopenharmony_ci } 4808c2ecf20Sopenharmony_ci 4818c2ecf20Sopenharmony_ci /* Ensure all changes indeed done before we're going on */ 4828c2ecf20Sopenharmony_ci wmb(); 4838c2ecf20Sopenharmony_ci 4848c2ecf20Sopenharmony_ci /* 4858c2ecf20Sopenharmony_ci * Updating into hardware the pointer of TX ring lets HSDMA to take 4868c2ecf20Sopenharmony_ci * action for those pending PDs. 4878c2ecf20Sopenharmony_ci */ 4888c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); 4898c2ecf20Sopenharmony_ci 4908c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&hsdma->lock, flags); 4918c2ecf20Sopenharmony_ci 4928c2ecf20Sopenharmony_ci return 0; 4938c2ecf20Sopenharmony_ci} 4948c2ecf20Sopenharmony_ci 4958c2ecf20Sopenharmony_cistatic void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma, 4968c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *hvc) 4978c2ecf20Sopenharmony_ci{ 4988c2ecf20Sopenharmony_ci struct virt_dma_desc *vd, *vd2; 4998c2ecf20Sopenharmony_ci int err; 5008c2ecf20Sopenharmony_ci 5018c2ecf20Sopenharmony_ci lockdep_assert_held(&hvc->vc.lock); 5028c2ecf20Sopenharmony_ci 5038c2ecf20Sopenharmony_ci list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) { 5048c2ecf20Sopenharmony_ci struct mtk_hsdma_vdesc *hvd; 5058c2ecf20Sopenharmony_ci 5068c2ecf20Sopenharmony_ci hvd = to_hsdma_vdesc(vd); 5078c2ecf20Sopenharmony_ci 5088c2ecf20Sopenharmony_ci /* Map VD into PC and all VCs shares a single PC */ 5098c2ecf20Sopenharmony_ci err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd); 5108c2ecf20Sopenharmony_ci 5118c2ecf20Sopenharmony_ci /* 5128c2ecf20Sopenharmony_ci * Move VD from desc_issued to desc_hw_processing when entire 5138c2ecf20Sopenharmony_ci * VD is fit into available PDs. Otherwise, the uncompleted 5148c2ecf20Sopenharmony_ci * VDs would stay in list desc_issued and then restart the 5158c2ecf20Sopenharmony_ci * processing as soon as possible once underlying ring space 5168c2ecf20Sopenharmony_ci * got freed. 5178c2ecf20Sopenharmony_ci */ 5188c2ecf20Sopenharmony_ci if (err == -ENOSPC || hvd->len > 0) 5198c2ecf20Sopenharmony_ci break; 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci /* 5228c2ecf20Sopenharmony_ci * The extra list desc_hw_processing is used because 5238c2ecf20Sopenharmony_ci * hardware can't provide sufficient information allowing us 5248c2ecf20Sopenharmony_ci * to know what VDs are still working on the underlying ring. 5258c2ecf20Sopenharmony_ci * Through the additional list, it can help us to implement 5268c2ecf20Sopenharmony_ci * terminate_all, residue calculation and such thing needed 5278c2ecf20Sopenharmony_ci * to know detail descriptor status on the hardware. 5288c2ecf20Sopenharmony_ci */ 5298c2ecf20Sopenharmony_ci list_move_tail(&vd->node, &hvc->desc_hw_processing); 5308c2ecf20Sopenharmony_ci } 5318c2ecf20Sopenharmony_ci} 5328c2ecf20Sopenharmony_ci 5338c2ecf20Sopenharmony_cistatic void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) 5348c2ecf20Sopenharmony_ci{ 5358c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *hvc; 5368c2ecf20Sopenharmony_ci struct mtk_hsdma_pdesc *rxd; 5378c2ecf20Sopenharmony_ci struct mtk_hsdma_vdesc *hvd; 5388c2ecf20Sopenharmony_ci struct mtk_hsdma_pchan *pc; 5398c2ecf20Sopenharmony_ci struct mtk_hsdma_cb *cb; 5408c2ecf20Sopenharmony_ci int i = MTK_DMA_SIZE; 5418c2ecf20Sopenharmony_ci __le32 desc2; 5428c2ecf20Sopenharmony_ci u32 status; 5438c2ecf20Sopenharmony_ci u16 next; 5448c2ecf20Sopenharmony_ci 5458c2ecf20Sopenharmony_ci /* Read IRQ status */ 5468c2ecf20Sopenharmony_ci status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS); 5478c2ecf20Sopenharmony_ci if (unlikely(!(status & MTK_HSDMA_INT_RXDONE))) 5488c2ecf20Sopenharmony_ci goto rx_done; 5498c2ecf20Sopenharmony_ci 5508c2ecf20Sopenharmony_ci pc = hsdma->pc; 5518c2ecf20Sopenharmony_ci 5528c2ecf20Sopenharmony_ci /* 5538c2ecf20Sopenharmony_ci * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to 5548c2ecf20Sopenharmony_ci * reclaim these finished descriptors: The most number of PDs the ISR 5558c2ecf20Sopenharmony_ci * can handle at one time shouldn't be more than MTK_DMA_SIZE so we 5568c2ecf20Sopenharmony_ci * take it as limited count instead of just using a dangerous infinite 5578c2ecf20Sopenharmony_ci * poll. 5588c2ecf20Sopenharmony_ci */ 5598c2ecf20Sopenharmony_ci while (i--) { 5608c2ecf20Sopenharmony_ci next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr, 5618c2ecf20Sopenharmony_ci MTK_DMA_SIZE); 5628c2ecf20Sopenharmony_ci rxd = &pc->ring.rxd[next]; 5638c2ecf20Sopenharmony_ci 5648c2ecf20Sopenharmony_ci /* 5658c2ecf20Sopenharmony_ci * If MTK_HSDMA_DESC_DDONE is no specified, that means data 5668c2ecf20Sopenharmony_ci * moving for the PD is still under going. 5678c2ecf20Sopenharmony_ci */ 5688c2ecf20Sopenharmony_ci desc2 = READ_ONCE(rxd->desc2); 5698c2ecf20Sopenharmony_ci if (!(desc2 & hsdma->soc->ddone)) 5708c2ecf20Sopenharmony_ci break; 5718c2ecf20Sopenharmony_ci 5728c2ecf20Sopenharmony_ci cb = &pc->ring.cb[next]; 5738c2ecf20Sopenharmony_ci if (unlikely(!cb->vd)) { 5748c2ecf20Sopenharmony_ci dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n"); 5758c2ecf20Sopenharmony_ci break; 5768c2ecf20Sopenharmony_ci } 5778c2ecf20Sopenharmony_ci 5788c2ecf20Sopenharmony_ci /* Update residue of VD the associated PD belonged to */ 5798c2ecf20Sopenharmony_ci hvd = to_hsdma_vdesc(cb->vd); 5808c2ecf20Sopenharmony_ci hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2); 5818c2ecf20Sopenharmony_ci 5828c2ecf20Sopenharmony_ci /* Complete VD until the relevant last PD is finished */ 5838c2ecf20Sopenharmony_ci if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) { 5848c2ecf20Sopenharmony_ci hvc = to_hsdma_vchan(cb->vd->tx.chan); 5858c2ecf20Sopenharmony_ci 5868c2ecf20Sopenharmony_ci spin_lock(&hvc->vc.lock); 5878c2ecf20Sopenharmony_ci 5888c2ecf20Sopenharmony_ci /* Remove VD from list desc_hw_processing */ 5898c2ecf20Sopenharmony_ci list_del(&cb->vd->node); 5908c2ecf20Sopenharmony_ci 5918c2ecf20Sopenharmony_ci /* Add VD into list desc_completed */ 5928c2ecf20Sopenharmony_ci vchan_cookie_complete(cb->vd); 5938c2ecf20Sopenharmony_ci 5948c2ecf20Sopenharmony_ci if (hvc->issue_synchronize && 5958c2ecf20Sopenharmony_ci list_empty(&hvc->desc_hw_processing)) { 5968c2ecf20Sopenharmony_ci complete(&hvc->issue_completion); 5978c2ecf20Sopenharmony_ci hvc->issue_synchronize = false; 5988c2ecf20Sopenharmony_ci } 5998c2ecf20Sopenharmony_ci spin_unlock(&hvc->vc.lock); 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci cb->flag = 0; 6028c2ecf20Sopenharmony_ci } 6038c2ecf20Sopenharmony_ci 6048c2ecf20Sopenharmony_ci cb->vd = 0; 6058c2ecf20Sopenharmony_ci 6068c2ecf20Sopenharmony_ci /* 6078c2ecf20Sopenharmony_ci * Recycle the RXD with the helper WRITE_ONCE that can ensure 6088c2ecf20Sopenharmony_ci * data written into RAM would really happens. 6098c2ecf20Sopenharmony_ci */ 6108c2ecf20Sopenharmony_ci WRITE_ONCE(rxd->desc1, 0); 6118c2ecf20Sopenharmony_ci WRITE_ONCE(rxd->desc2, 0); 6128c2ecf20Sopenharmony_ci pc->ring.cur_rptr = next; 6138c2ecf20Sopenharmony_ci 6148c2ecf20Sopenharmony_ci /* Release rooms */ 6158c2ecf20Sopenharmony_ci atomic_inc(&pc->nr_free); 6168c2ecf20Sopenharmony_ci } 6178c2ecf20Sopenharmony_ci 6188c2ecf20Sopenharmony_ci /* Ensure all changes indeed done before we're going on */ 6198c2ecf20Sopenharmony_ci wmb(); 6208c2ecf20Sopenharmony_ci 6218c2ecf20Sopenharmony_ci /* Update CPU pointer for those completed PDs */ 6228c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr); 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci /* 6258c2ecf20Sopenharmony_ci * Acking the pending IRQ allows hardware no longer to keep the used 6268c2ecf20Sopenharmony_ci * IRQ line in certain trigger state when software has completed all 6278c2ecf20Sopenharmony_ci * the finished physical descriptors. 6288c2ecf20Sopenharmony_ci */ 6298c2ecf20Sopenharmony_ci if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1) 6308c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status); 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci /* ASAP handles pending VDs in all VCs after freeing some rooms */ 6338c2ecf20Sopenharmony_ci for (i = 0; i < hsdma->dma_requests; i++) { 6348c2ecf20Sopenharmony_ci hvc = &hsdma->vc[i]; 6358c2ecf20Sopenharmony_ci spin_lock(&hvc->vc.lock); 6368c2ecf20Sopenharmony_ci mtk_hsdma_issue_vchan_pending(hsdma, hvc); 6378c2ecf20Sopenharmony_ci spin_unlock(&hvc->vc.lock); 6388c2ecf20Sopenharmony_ci } 6398c2ecf20Sopenharmony_ci 6408c2ecf20Sopenharmony_cirx_done: 6418c2ecf20Sopenharmony_ci /* All completed PDs are cleaned up, so enable interrupt again */ 6428c2ecf20Sopenharmony_ci mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); 6438c2ecf20Sopenharmony_ci} 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_cistatic irqreturn_t mtk_hsdma_irq(int irq, void *devid) 6468c2ecf20Sopenharmony_ci{ 6478c2ecf20Sopenharmony_ci struct mtk_hsdma_device *hsdma = devid; 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_ci /* 6508c2ecf20Sopenharmony_ci * Disable interrupt until all completed PDs are cleaned up in 6518c2ecf20Sopenharmony_ci * mtk_hsdma_free_rooms call. 6528c2ecf20Sopenharmony_ci */ 6538c2ecf20Sopenharmony_ci mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); 6548c2ecf20Sopenharmony_ci 6558c2ecf20Sopenharmony_ci mtk_hsdma_free_rooms_in_ring(hsdma); 6568c2ecf20Sopenharmony_ci 6578c2ecf20Sopenharmony_ci return IRQ_HANDLED; 6588c2ecf20Sopenharmony_ci} 6598c2ecf20Sopenharmony_ci 6608c2ecf20Sopenharmony_cistatic struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c, 6618c2ecf20Sopenharmony_ci dma_cookie_t cookie) 6628c2ecf20Sopenharmony_ci{ 6638c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); 6648c2ecf20Sopenharmony_ci struct virt_dma_desc *vd; 6658c2ecf20Sopenharmony_ci 6668c2ecf20Sopenharmony_ci list_for_each_entry(vd, &hvc->desc_hw_processing, node) 6678c2ecf20Sopenharmony_ci if (vd->tx.cookie == cookie) 6688c2ecf20Sopenharmony_ci return vd; 6698c2ecf20Sopenharmony_ci 6708c2ecf20Sopenharmony_ci list_for_each_entry(vd, &hvc->vc.desc_issued, node) 6718c2ecf20Sopenharmony_ci if (vd->tx.cookie == cookie) 6728c2ecf20Sopenharmony_ci return vd; 6738c2ecf20Sopenharmony_ci 6748c2ecf20Sopenharmony_ci return NULL; 6758c2ecf20Sopenharmony_ci} 6768c2ecf20Sopenharmony_ci 6778c2ecf20Sopenharmony_cistatic enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, 6788c2ecf20Sopenharmony_ci dma_cookie_t cookie, 6798c2ecf20Sopenharmony_ci struct dma_tx_state *txstate) 6808c2ecf20Sopenharmony_ci{ 6818c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); 6828c2ecf20Sopenharmony_ci struct mtk_hsdma_vdesc *hvd; 6838c2ecf20Sopenharmony_ci struct virt_dma_desc *vd; 6848c2ecf20Sopenharmony_ci enum dma_status ret; 6858c2ecf20Sopenharmony_ci unsigned long flags; 6868c2ecf20Sopenharmony_ci size_t bytes = 0; 6878c2ecf20Sopenharmony_ci 6888c2ecf20Sopenharmony_ci ret = dma_cookie_status(c, cookie, txstate); 6898c2ecf20Sopenharmony_ci if (ret == DMA_COMPLETE || !txstate) 6908c2ecf20Sopenharmony_ci return ret; 6918c2ecf20Sopenharmony_ci 6928c2ecf20Sopenharmony_ci spin_lock_irqsave(&hvc->vc.lock, flags); 6938c2ecf20Sopenharmony_ci vd = mtk_hsdma_find_active_desc(c, cookie); 6948c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&hvc->vc.lock, flags); 6958c2ecf20Sopenharmony_ci 6968c2ecf20Sopenharmony_ci if (vd) { 6978c2ecf20Sopenharmony_ci hvd = to_hsdma_vdesc(vd); 6988c2ecf20Sopenharmony_ci bytes = hvd->residue; 6998c2ecf20Sopenharmony_ci } 7008c2ecf20Sopenharmony_ci 7018c2ecf20Sopenharmony_ci dma_set_residue(txstate, bytes); 7028c2ecf20Sopenharmony_ci 7038c2ecf20Sopenharmony_ci return ret; 7048c2ecf20Sopenharmony_ci} 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_cistatic void mtk_hsdma_issue_pending(struct dma_chan *c) 7078c2ecf20Sopenharmony_ci{ 7088c2ecf20Sopenharmony_ci struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); 7098c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); 7108c2ecf20Sopenharmony_ci unsigned long flags; 7118c2ecf20Sopenharmony_ci 7128c2ecf20Sopenharmony_ci spin_lock_irqsave(&hvc->vc.lock, flags); 7138c2ecf20Sopenharmony_ci 7148c2ecf20Sopenharmony_ci if (vchan_issue_pending(&hvc->vc)) 7158c2ecf20Sopenharmony_ci mtk_hsdma_issue_vchan_pending(hsdma, hvc); 7168c2ecf20Sopenharmony_ci 7178c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&hvc->vc.lock, flags); 7188c2ecf20Sopenharmony_ci} 7198c2ecf20Sopenharmony_ci 7208c2ecf20Sopenharmony_cistatic struct dma_async_tx_descriptor * 7218c2ecf20Sopenharmony_cimtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, 7228c2ecf20Sopenharmony_ci dma_addr_t src, size_t len, unsigned long flags) 7238c2ecf20Sopenharmony_ci{ 7248c2ecf20Sopenharmony_ci struct mtk_hsdma_vdesc *hvd; 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ci hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT); 7278c2ecf20Sopenharmony_ci if (!hvd) 7288c2ecf20Sopenharmony_ci return NULL; 7298c2ecf20Sopenharmony_ci 7308c2ecf20Sopenharmony_ci hvd->len = len; 7318c2ecf20Sopenharmony_ci hvd->residue = len; 7328c2ecf20Sopenharmony_ci hvd->src = src; 7338c2ecf20Sopenharmony_ci hvd->dest = dest; 7348c2ecf20Sopenharmony_ci 7358c2ecf20Sopenharmony_ci return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags); 7368c2ecf20Sopenharmony_ci} 7378c2ecf20Sopenharmony_ci 7388c2ecf20Sopenharmony_cistatic int mtk_hsdma_free_inactive_desc(struct dma_chan *c) 7398c2ecf20Sopenharmony_ci{ 7408c2ecf20Sopenharmony_ci struct virt_dma_chan *vc = to_virt_chan(c); 7418c2ecf20Sopenharmony_ci unsigned long flags; 7428c2ecf20Sopenharmony_ci LIST_HEAD(head); 7438c2ecf20Sopenharmony_ci 7448c2ecf20Sopenharmony_ci spin_lock_irqsave(&vc->lock, flags); 7458c2ecf20Sopenharmony_ci list_splice_tail_init(&vc->desc_allocated, &head); 7468c2ecf20Sopenharmony_ci list_splice_tail_init(&vc->desc_submitted, &head); 7478c2ecf20Sopenharmony_ci list_splice_tail_init(&vc->desc_issued, &head); 7488c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&vc->lock, flags); 7498c2ecf20Sopenharmony_ci 7508c2ecf20Sopenharmony_ci /* At the point, we don't expect users put descriptor into VC again */ 7518c2ecf20Sopenharmony_ci vchan_dma_desc_free_list(vc, &head); 7528c2ecf20Sopenharmony_ci 7538c2ecf20Sopenharmony_ci return 0; 7548c2ecf20Sopenharmony_ci} 7558c2ecf20Sopenharmony_ci 7568c2ecf20Sopenharmony_cistatic void mtk_hsdma_free_active_desc(struct dma_chan *c) 7578c2ecf20Sopenharmony_ci{ 7588c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); 7598c2ecf20Sopenharmony_ci bool sync_needed = false; 7608c2ecf20Sopenharmony_ci 7618c2ecf20Sopenharmony_ci /* 7628c2ecf20Sopenharmony_ci * Once issue_synchronize is being set, which means once the hardware 7638c2ecf20Sopenharmony_ci * consumes all descriptors for the channel in the ring, the 7648c2ecf20Sopenharmony_ci * synchronization must be be notified immediately it is completed. 7658c2ecf20Sopenharmony_ci */ 7668c2ecf20Sopenharmony_ci spin_lock(&hvc->vc.lock); 7678c2ecf20Sopenharmony_ci if (!list_empty(&hvc->desc_hw_processing)) { 7688c2ecf20Sopenharmony_ci hvc->issue_synchronize = true; 7698c2ecf20Sopenharmony_ci sync_needed = true; 7708c2ecf20Sopenharmony_ci } 7718c2ecf20Sopenharmony_ci spin_unlock(&hvc->vc.lock); 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci if (sync_needed) 7748c2ecf20Sopenharmony_ci wait_for_completion(&hvc->issue_completion); 7758c2ecf20Sopenharmony_ci /* 7768c2ecf20Sopenharmony_ci * At the point, we expect that all remaining descriptors in the ring 7778c2ecf20Sopenharmony_ci * for the channel should be all processing done. 7788c2ecf20Sopenharmony_ci */ 7798c2ecf20Sopenharmony_ci WARN_ONCE(!list_empty(&hvc->desc_hw_processing), 7808c2ecf20Sopenharmony_ci "Desc pending still in list desc_hw_processing\n"); 7818c2ecf20Sopenharmony_ci 7828c2ecf20Sopenharmony_ci /* Free all descriptors in list desc_completed */ 7838c2ecf20Sopenharmony_ci vchan_synchronize(&hvc->vc); 7848c2ecf20Sopenharmony_ci 7858c2ecf20Sopenharmony_ci WARN_ONCE(!list_empty(&hvc->vc.desc_completed), 7868c2ecf20Sopenharmony_ci "Desc pending still in list desc_completed\n"); 7878c2ecf20Sopenharmony_ci} 7888c2ecf20Sopenharmony_ci 7898c2ecf20Sopenharmony_cistatic int mtk_hsdma_terminate_all(struct dma_chan *c) 7908c2ecf20Sopenharmony_ci{ 7918c2ecf20Sopenharmony_ci /* 7928c2ecf20Sopenharmony_ci * Free pending descriptors not processed yet by hardware that have 7938c2ecf20Sopenharmony_ci * previously been submitted to the channel. 7948c2ecf20Sopenharmony_ci */ 7958c2ecf20Sopenharmony_ci mtk_hsdma_free_inactive_desc(c); 7968c2ecf20Sopenharmony_ci 7978c2ecf20Sopenharmony_ci /* 7988c2ecf20Sopenharmony_ci * However, the DMA engine doesn't provide any way to stop these 7998c2ecf20Sopenharmony_ci * descriptors being processed currently by hardware. The only way is 8008c2ecf20Sopenharmony_ci * to just waiting until these descriptors are all processed completely 8018c2ecf20Sopenharmony_ci * through mtk_hsdma_free_active_desc call. 8028c2ecf20Sopenharmony_ci */ 8038c2ecf20Sopenharmony_ci mtk_hsdma_free_active_desc(c); 8048c2ecf20Sopenharmony_ci 8058c2ecf20Sopenharmony_ci return 0; 8068c2ecf20Sopenharmony_ci} 8078c2ecf20Sopenharmony_ci 8088c2ecf20Sopenharmony_cistatic int mtk_hsdma_alloc_chan_resources(struct dma_chan *c) 8098c2ecf20Sopenharmony_ci{ 8108c2ecf20Sopenharmony_ci struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); 8118c2ecf20Sopenharmony_ci int err; 8128c2ecf20Sopenharmony_ci 8138c2ecf20Sopenharmony_ci /* 8148c2ecf20Sopenharmony_ci * Since HSDMA has only one PC, the resource for PC is being allocated 8158c2ecf20Sopenharmony_ci * when the first VC is being created and the other VCs would run on 8168c2ecf20Sopenharmony_ci * the same PC. 8178c2ecf20Sopenharmony_ci */ 8188c2ecf20Sopenharmony_ci if (!refcount_read(&hsdma->pc_refcnt)) { 8198c2ecf20Sopenharmony_ci err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc); 8208c2ecf20Sopenharmony_ci if (err) 8218c2ecf20Sopenharmony_ci return err; 8228c2ecf20Sopenharmony_ci /* 8238c2ecf20Sopenharmony_ci * refcount_inc would complain increment on 0; use-after-free. 8248c2ecf20Sopenharmony_ci * Thus, we need to explicitly set it as 1 initially. 8258c2ecf20Sopenharmony_ci */ 8268c2ecf20Sopenharmony_ci refcount_set(&hsdma->pc_refcnt, 1); 8278c2ecf20Sopenharmony_ci } else { 8288c2ecf20Sopenharmony_ci refcount_inc(&hsdma->pc_refcnt); 8298c2ecf20Sopenharmony_ci } 8308c2ecf20Sopenharmony_ci 8318c2ecf20Sopenharmony_ci return 0; 8328c2ecf20Sopenharmony_ci} 8338c2ecf20Sopenharmony_ci 8348c2ecf20Sopenharmony_cistatic void mtk_hsdma_free_chan_resources(struct dma_chan *c) 8358c2ecf20Sopenharmony_ci{ 8368c2ecf20Sopenharmony_ci struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); 8378c2ecf20Sopenharmony_ci 8388c2ecf20Sopenharmony_ci /* Free all descriptors in all lists on the VC */ 8398c2ecf20Sopenharmony_ci mtk_hsdma_terminate_all(c); 8408c2ecf20Sopenharmony_ci 8418c2ecf20Sopenharmony_ci /* The resource for PC is not freed until all the VCs are destroyed */ 8428c2ecf20Sopenharmony_ci if (!refcount_dec_and_test(&hsdma->pc_refcnt)) 8438c2ecf20Sopenharmony_ci return; 8448c2ecf20Sopenharmony_ci 8458c2ecf20Sopenharmony_ci mtk_hsdma_free_pchan(hsdma, hsdma->pc); 8468c2ecf20Sopenharmony_ci} 8478c2ecf20Sopenharmony_ci 8488c2ecf20Sopenharmony_cistatic int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma) 8498c2ecf20Sopenharmony_ci{ 8508c2ecf20Sopenharmony_ci int err; 8518c2ecf20Sopenharmony_ci 8528c2ecf20Sopenharmony_ci pm_runtime_enable(hsdma2dev(hsdma)); 8538c2ecf20Sopenharmony_ci pm_runtime_get_sync(hsdma2dev(hsdma)); 8548c2ecf20Sopenharmony_ci 8558c2ecf20Sopenharmony_ci err = clk_prepare_enable(hsdma->clk); 8568c2ecf20Sopenharmony_ci if (err) 8578c2ecf20Sopenharmony_ci return err; 8588c2ecf20Sopenharmony_ci 8598c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); 8608c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT); 8618c2ecf20Sopenharmony_ci 8628c2ecf20Sopenharmony_ci return 0; 8638c2ecf20Sopenharmony_ci} 8648c2ecf20Sopenharmony_ci 8658c2ecf20Sopenharmony_cistatic int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma) 8668c2ecf20Sopenharmony_ci{ 8678c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0); 8688c2ecf20Sopenharmony_ci 8698c2ecf20Sopenharmony_ci clk_disable_unprepare(hsdma->clk); 8708c2ecf20Sopenharmony_ci 8718c2ecf20Sopenharmony_ci pm_runtime_put_sync(hsdma2dev(hsdma)); 8728c2ecf20Sopenharmony_ci pm_runtime_disable(hsdma2dev(hsdma)); 8738c2ecf20Sopenharmony_ci 8748c2ecf20Sopenharmony_ci return 0; 8758c2ecf20Sopenharmony_ci} 8768c2ecf20Sopenharmony_ci 8778c2ecf20Sopenharmony_cistatic const struct mtk_hsdma_soc mt7623_soc = { 8788c2ecf20Sopenharmony_ci .ddone = BIT(31), 8798c2ecf20Sopenharmony_ci .ls0 = BIT(30), 8808c2ecf20Sopenharmony_ci}; 8818c2ecf20Sopenharmony_ci 8828c2ecf20Sopenharmony_cistatic const struct mtk_hsdma_soc mt7622_soc = { 8838c2ecf20Sopenharmony_ci .ddone = BIT(15), 8848c2ecf20Sopenharmony_ci .ls0 = BIT(14), 8858c2ecf20Sopenharmony_ci}; 8868c2ecf20Sopenharmony_ci 8878c2ecf20Sopenharmony_cistatic const struct of_device_id mtk_hsdma_match[] = { 8888c2ecf20Sopenharmony_ci { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc}, 8898c2ecf20Sopenharmony_ci { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc}, 8908c2ecf20Sopenharmony_ci { /* sentinel */ } 8918c2ecf20Sopenharmony_ci}; 8928c2ecf20Sopenharmony_ciMODULE_DEVICE_TABLE(of, mtk_hsdma_match); 8938c2ecf20Sopenharmony_ci 8948c2ecf20Sopenharmony_cistatic int mtk_hsdma_probe(struct platform_device *pdev) 8958c2ecf20Sopenharmony_ci{ 8968c2ecf20Sopenharmony_ci struct mtk_hsdma_device *hsdma; 8978c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *vc; 8988c2ecf20Sopenharmony_ci struct dma_device *dd; 8998c2ecf20Sopenharmony_ci struct resource *res; 9008c2ecf20Sopenharmony_ci int i, err; 9018c2ecf20Sopenharmony_ci 9028c2ecf20Sopenharmony_ci hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); 9038c2ecf20Sopenharmony_ci if (!hsdma) 9048c2ecf20Sopenharmony_ci return -ENOMEM; 9058c2ecf20Sopenharmony_ci 9068c2ecf20Sopenharmony_ci dd = &hsdma->ddev; 9078c2ecf20Sopenharmony_ci 9088c2ecf20Sopenharmony_ci res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 9098c2ecf20Sopenharmony_ci hsdma->base = devm_ioremap_resource(&pdev->dev, res); 9108c2ecf20Sopenharmony_ci if (IS_ERR(hsdma->base)) 9118c2ecf20Sopenharmony_ci return PTR_ERR(hsdma->base); 9128c2ecf20Sopenharmony_ci 9138c2ecf20Sopenharmony_ci hsdma->soc = of_device_get_match_data(&pdev->dev); 9148c2ecf20Sopenharmony_ci if (!hsdma->soc) { 9158c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "No device match found\n"); 9168c2ecf20Sopenharmony_ci return -ENODEV; 9178c2ecf20Sopenharmony_ci } 9188c2ecf20Sopenharmony_ci 9198c2ecf20Sopenharmony_ci hsdma->clk = devm_clk_get(&pdev->dev, "hsdma"); 9208c2ecf20Sopenharmony_ci if (IS_ERR(hsdma->clk)) { 9218c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "No clock for %s\n", 9228c2ecf20Sopenharmony_ci dev_name(&pdev->dev)); 9238c2ecf20Sopenharmony_ci return PTR_ERR(hsdma->clk); 9248c2ecf20Sopenharmony_ci } 9258c2ecf20Sopenharmony_ci 9268c2ecf20Sopenharmony_ci res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 9278c2ecf20Sopenharmony_ci if (!res) { 9288c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "No irq resource for %s\n", 9298c2ecf20Sopenharmony_ci dev_name(&pdev->dev)); 9308c2ecf20Sopenharmony_ci return -EINVAL; 9318c2ecf20Sopenharmony_ci } 9328c2ecf20Sopenharmony_ci hsdma->irq = res->start; 9338c2ecf20Sopenharmony_ci 9348c2ecf20Sopenharmony_ci refcount_set(&hsdma->pc_refcnt, 0); 9358c2ecf20Sopenharmony_ci spin_lock_init(&hsdma->lock); 9368c2ecf20Sopenharmony_ci 9378c2ecf20Sopenharmony_ci dma_cap_set(DMA_MEMCPY, dd->cap_mask); 9388c2ecf20Sopenharmony_ci 9398c2ecf20Sopenharmony_ci dd->copy_align = MTK_HSDMA_ALIGN_SIZE; 9408c2ecf20Sopenharmony_ci dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources; 9418c2ecf20Sopenharmony_ci dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; 9428c2ecf20Sopenharmony_ci dd->device_tx_status = mtk_hsdma_tx_status; 9438c2ecf20Sopenharmony_ci dd->device_issue_pending = mtk_hsdma_issue_pending; 9448c2ecf20Sopenharmony_ci dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; 9458c2ecf20Sopenharmony_ci dd->device_terminate_all = mtk_hsdma_terminate_all; 9468c2ecf20Sopenharmony_ci dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; 9478c2ecf20Sopenharmony_ci dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; 9488c2ecf20Sopenharmony_ci dd->directions = BIT(DMA_MEM_TO_MEM); 9498c2ecf20Sopenharmony_ci dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 9508c2ecf20Sopenharmony_ci dd->dev = &pdev->dev; 9518c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&dd->channels); 9528c2ecf20Sopenharmony_ci 9538c2ecf20Sopenharmony_ci hsdma->dma_requests = MTK_HSDMA_NR_VCHANS; 9548c2ecf20Sopenharmony_ci if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, 9558c2ecf20Sopenharmony_ci "dma-requests", 9568c2ecf20Sopenharmony_ci &hsdma->dma_requests)) { 9578c2ecf20Sopenharmony_ci dev_info(&pdev->dev, 9588c2ecf20Sopenharmony_ci "Using %u as missing dma-requests property\n", 9598c2ecf20Sopenharmony_ci MTK_HSDMA_NR_VCHANS); 9608c2ecf20Sopenharmony_ci } 9618c2ecf20Sopenharmony_ci 9628c2ecf20Sopenharmony_ci hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS, 9638c2ecf20Sopenharmony_ci sizeof(*hsdma->pc), GFP_KERNEL); 9648c2ecf20Sopenharmony_ci if (!hsdma->pc) 9658c2ecf20Sopenharmony_ci return -ENOMEM; 9668c2ecf20Sopenharmony_ci 9678c2ecf20Sopenharmony_ci hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests, 9688c2ecf20Sopenharmony_ci sizeof(*hsdma->vc), GFP_KERNEL); 9698c2ecf20Sopenharmony_ci if (!hsdma->vc) 9708c2ecf20Sopenharmony_ci return -ENOMEM; 9718c2ecf20Sopenharmony_ci 9728c2ecf20Sopenharmony_ci for (i = 0; i < hsdma->dma_requests; i++) { 9738c2ecf20Sopenharmony_ci vc = &hsdma->vc[i]; 9748c2ecf20Sopenharmony_ci vc->vc.desc_free = mtk_hsdma_vdesc_free; 9758c2ecf20Sopenharmony_ci vchan_init(&vc->vc, dd); 9768c2ecf20Sopenharmony_ci init_completion(&vc->issue_completion); 9778c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&vc->desc_hw_processing); 9788c2ecf20Sopenharmony_ci } 9798c2ecf20Sopenharmony_ci 9808c2ecf20Sopenharmony_ci err = dma_async_device_register(dd); 9818c2ecf20Sopenharmony_ci if (err) 9828c2ecf20Sopenharmony_ci return err; 9838c2ecf20Sopenharmony_ci 9848c2ecf20Sopenharmony_ci err = of_dma_controller_register(pdev->dev.of_node, 9858c2ecf20Sopenharmony_ci of_dma_xlate_by_chan_id, hsdma); 9868c2ecf20Sopenharmony_ci if (err) { 9878c2ecf20Sopenharmony_ci dev_err(&pdev->dev, 9888c2ecf20Sopenharmony_ci "MediaTek HSDMA OF registration failed %d\n", err); 9898c2ecf20Sopenharmony_ci goto err_unregister; 9908c2ecf20Sopenharmony_ci } 9918c2ecf20Sopenharmony_ci 9928c2ecf20Sopenharmony_ci mtk_hsdma_hw_init(hsdma); 9938c2ecf20Sopenharmony_ci 9948c2ecf20Sopenharmony_ci err = devm_request_irq(&pdev->dev, hsdma->irq, 9958c2ecf20Sopenharmony_ci mtk_hsdma_irq, 0, 9968c2ecf20Sopenharmony_ci dev_name(&pdev->dev), hsdma); 9978c2ecf20Sopenharmony_ci if (err) { 9988c2ecf20Sopenharmony_ci dev_err(&pdev->dev, 9998c2ecf20Sopenharmony_ci "request_irq failed with err %d\n", err); 10008c2ecf20Sopenharmony_ci goto err_free; 10018c2ecf20Sopenharmony_ci } 10028c2ecf20Sopenharmony_ci 10038c2ecf20Sopenharmony_ci platform_set_drvdata(pdev, hsdma); 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n"); 10068c2ecf20Sopenharmony_ci 10078c2ecf20Sopenharmony_ci return 0; 10088c2ecf20Sopenharmony_ci 10098c2ecf20Sopenharmony_cierr_free: 10108c2ecf20Sopenharmony_ci mtk_hsdma_hw_deinit(hsdma); 10118c2ecf20Sopenharmony_ci of_dma_controller_free(pdev->dev.of_node); 10128c2ecf20Sopenharmony_cierr_unregister: 10138c2ecf20Sopenharmony_ci dma_async_device_unregister(dd); 10148c2ecf20Sopenharmony_ci 10158c2ecf20Sopenharmony_ci return err; 10168c2ecf20Sopenharmony_ci} 10178c2ecf20Sopenharmony_ci 10188c2ecf20Sopenharmony_cistatic int mtk_hsdma_remove(struct platform_device *pdev) 10198c2ecf20Sopenharmony_ci{ 10208c2ecf20Sopenharmony_ci struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev); 10218c2ecf20Sopenharmony_ci struct mtk_hsdma_vchan *vc; 10228c2ecf20Sopenharmony_ci int i; 10238c2ecf20Sopenharmony_ci 10248c2ecf20Sopenharmony_ci /* Kill VC task */ 10258c2ecf20Sopenharmony_ci for (i = 0; i < hsdma->dma_requests; i++) { 10268c2ecf20Sopenharmony_ci vc = &hsdma->vc[i]; 10278c2ecf20Sopenharmony_ci 10288c2ecf20Sopenharmony_ci list_del(&vc->vc.chan.device_node); 10298c2ecf20Sopenharmony_ci tasklet_kill(&vc->vc.task); 10308c2ecf20Sopenharmony_ci } 10318c2ecf20Sopenharmony_ci 10328c2ecf20Sopenharmony_ci /* Disable DMA interrupt */ 10338c2ecf20Sopenharmony_ci mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); 10348c2ecf20Sopenharmony_ci 10358c2ecf20Sopenharmony_ci /* Waits for any pending IRQ handlers to complete */ 10368c2ecf20Sopenharmony_ci synchronize_irq(hsdma->irq); 10378c2ecf20Sopenharmony_ci 10388c2ecf20Sopenharmony_ci /* Disable hardware */ 10398c2ecf20Sopenharmony_ci mtk_hsdma_hw_deinit(hsdma); 10408c2ecf20Sopenharmony_ci 10418c2ecf20Sopenharmony_ci dma_async_device_unregister(&hsdma->ddev); 10428c2ecf20Sopenharmony_ci of_dma_controller_free(pdev->dev.of_node); 10438c2ecf20Sopenharmony_ci 10448c2ecf20Sopenharmony_ci return 0; 10458c2ecf20Sopenharmony_ci} 10468c2ecf20Sopenharmony_ci 10478c2ecf20Sopenharmony_cistatic struct platform_driver mtk_hsdma_driver = { 10488c2ecf20Sopenharmony_ci .probe = mtk_hsdma_probe, 10498c2ecf20Sopenharmony_ci .remove = mtk_hsdma_remove, 10508c2ecf20Sopenharmony_ci .driver = { 10518c2ecf20Sopenharmony_ci .name = KBUILD_MODNAME, 10528c2ecf20Sopenharmony_ci .of_match_table = mtk_hsdma_match, 10538c2ecf20Sopenharmony_ci }, 10548c2ecf20Sopenharmony_ci}; 10558c2ecf20Sopenharmony_cimodule_platform_driver(mtk_hsdma_driver); 10568c2ecf20Sopenharmony_ci 10578c2ecf20Sopenharmony_ciMODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver"); 10588c2ecf20Sopenharmony_ciMODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 10598c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL v2"); 1060