1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * 4 * Copyright (C) 2011 John Crispin <john@phrozen.org> 5 */ 6 7#include <linux/init.h> 8#include <linux/platform_device.h> 9#include <linux/io.h> 10#include <linux/dma-mapping.h> 11#include <linux/export.h> 12#include <linux/spinlock.h> 13#include <linux/clk.h> 14#include <linux/delay.h> 15#include <linux/err.h> 16 17#include <lantiq_soc.h> 18#include <xway_dma.h> 19 20#define LTQ_DMA_ID 0x08 21#define LTQ_DMA_CTRL 0x10 22#define LTQ_DMA_CPOLL 0x14 23#define LTQ_DMA_CS 0x18 24#define LTQ_DMA_CCTRL 0x1C 25#define LTQ_DMA_CDBA 0x20 26#define LTQ_DMA_CDLEN 0x24 27#define LTQ_DMA_CIS 0x28 28#define LTQ_DMA_CIE 0x2C 29#define LTQ_DMA_PS 0x40 30#define LTQ_DMA_PCTRL 0x44 31#define LTQ_DMA_IRNEN 0xf4 32 33#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */ 34#define DMA_DESCPT BIT(3) /* descriptor complete irq */ 35#define DMA_TX BIT(8) /* TX channel direction */ 36#define DMA_CHAN_ON BIT(0) /* channel on / off bit */ 37#define DMA_PDEN BIT(6) /* enable packet drop */ 38#define DMA_CHAN_RST BIT(1) /* channel on / off bit */ 39#define DMA_RESET BIT(0) /* channel on / off bit */ 40#define DMA_IRQ_ACK 0x7e /* IRQ status register */ 41#define DMA_POLL BIT(31) /* turn on channel polling */ 42#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ 43#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */ 44#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */ 45#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */ 46#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */ 47#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */ 48#define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ 49#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ 50 51#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) 52#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) 53#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ 54 ltq_dma_membase + (z)) 55 56static void __iomem *ltq_dma_membase; 57static DEFINE_SPINLOCK(ltq_dma_lock); 58 59void 60ltq_dma_enable_irq(struct ltq_dma_channel *ch) 61{ 62 unsigned long flags; 63 64 spin_lock_irqsave(<q_dma_lock, flags); 65 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 66 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 67 spin_unlock_irqrestore(<q_dma_lock, flags); 68} 69EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); 70 71void 72ltq_dma_disable_irq(struct ltq_dma_channel *ch) 73{ 74 unsigned long flags; 75 76 spin_lock_irqsave(<q_dma_lock, flags); 77 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 78 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); 79 spin_unlock_irqrestore(<q_dma_lock, flags); 80} 81EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); 82 83void 84ltq_dma_ack_irq(struct ltq_dma_channel *ch) 85{ 86 unsigned long flags; 87 88 spin_lock_irqsave(<q_dma_lock, flags); 89 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 90 ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); 91 spin_unlock_irqrestore(<q_dma_lock, flags); 92} 93EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); 94 95void 96ltq_dma_open(struct ltq_dma_channel *ch) 97{ 98 unsigned long flag; 99 100 spin_lock_irqsave(<q_dma_lock, flag); 101 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 102 ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); 103 spin_unlock_irqrestore(<q_dma_lock, flag); 104} 105EXPORT_SYMBOL_GPL(ltq_dma_open); 106 107void 108ltq_dma_close(struct ltq_dma_channel *ch) 109{ 110 unsigned long flag; 111 112 spin_lock_irqsave(<q_dma_lock, flag); 113 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 114 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 115 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); 116 spin_unlock_irqrestore(<q_dma_lock, flag); 117} 118EXPORT_SYMBOL_GPL(ltq_dma_close); 119 120static void 121ltq_dma_alloc(struct ltq_dma_channel *ch) 122{ 123 unsigned long flags; 124 125 ch->desc = 0; 126 ch->desc_base = dma_alloc_coherent(ch->dev, 127 LTQ_DESC_NUM * LTQ_DESC_SIZE, 128 &ch->phys, GFP_ATOMIC); 129 130 spin_lock_irqsave(<q_dma_lock, flags); 131 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 132 ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); 133 ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); 134 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 135 wmb(); 136 ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); 137 while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) 138 ; 139 spin_unlock_irqrestore(<q_dma_lock, flags); 140} 141 142void 143ltq_dma_alloc_tx(struct ltq_dma_channel *ch) 144{ 145 unsigned long flags; 146 147 ltq_dma_alloc(ch); 148 149 spin_lock_irqsave(<q_dma_lock, flags); 150 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); 151 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 152 ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); 153 spin_unlock_irqrestore(<q_dma_lock, flags); 154} 155EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); 156 157void 158ltq_dma_alloc_rx(struct ltq_dma_channel *ch) 159{ 160 unsigned long flags; 161 162 ltq_dma_alloc(ch); 163 164 spin_lock_irqsave(<q_dma_lock, flags); 165 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); 166 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 167 ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); 168 spin_unlock_irqrestore(<q_dma_lock, flags); 169} 170EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); 171 172void 173ltq_dma_free(struct ltq_dma_channel *ch) 174{ 175 if (!ch->desc_base) 176 return; 177 ltq_dma_close(ch); 178 dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE, 179 ch->desc_base, ch->phys); 180} 181EXPORT_SYMBOL_GPL(ltq_dma_free); 182 183void 184ltq_dma_init_port(int p) 185{ 186 ltq_dma_w32(p, LTQ_DMA_PS); 187 switch (p) { 188 case DMA_PORT_ETOP: 189 /* 190 * Tell the DMA engine to swap the endianness of data frames and 191 * drop packets if the channel arbitration fails. 192 */ 193 ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN, 194 LTQ_DMA_PCTRL); 195 break; 196 197 case DMA_PORT_DEU: 198 ltq_dma_w32((DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT) | 199 (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT), 200 LTQ_DMA_PCTRL); 201 break; 202 203 default: 204 break; 205 } 206} 207EXPORT_SYMBOL_GPL(ltq_dma_init_port); 208 209static int 210ltq_dma_init(struct platform_device *pdev) 211{ 212 struct clk *clk; 213 struct resource *res; 214 unsigned int id, nchannels; 215 int i; 216 217 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 218 ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res); 219 if (IS_ERR(ltq_dma_membase)) 220 panic("Failed to remap dma resource"); 221 222 /* power up and reset the dma engine */ 223 clk = clk_get(&pdev->dev, NULL); 224 if (IS_ERR(clk)) 225 panic("Failed to get dma clock"); 226 227 clk_enable(clk); 228 ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); 229 230 usleep_range(1, 10); 231 232 /* disable all interrupts */ 233 ltq_dma_w32(0, LTQ_DMA_IRNEN); 234 235 /* reset/configure each channel */ 236 id = ltq_dma_r32(LTQ_DMA_ID); 237 nchannels = ((id & DMA_ID_CHNR) >> 20); 238 for (i = 0; i < nchannels; i++) { 239 ltq_dma_w32(i, LTQ_DMA_CS); 240 ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); 241 ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); 242 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 243 } 244 245 dev_info(&pdev->dev, 246 "Init done - hw rev: %X, ports: %d, channels: %d\n", 247 id & 0x1f, (id >> 16) & 0xf, nchannels); 248 249 return 0; 250} 251 252static const struct of_device_id dma_match[] = { 253 { .compatible = "lantiq,dma-xway" }, 254 {}, 255}; 256 257static struct platform_driver dma_driver = { 258 .probe = ltq_dma_init, 259 .driver = { 260 .name = "dma-xway", 261 .of_match_table = dma_match, 262 }, 263}; 264 265int __init 266dma_init(void) 267{ 268 return platform_driver_register(&dma_driver); 269} 270 271postcore_initcall(dma_init); 272