1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
4  * Author: Addy Ke <addy.ke@rock-chips.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dmaengine.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/pinctrl/consumer.h>
13 #include <linux/pinctrl/devinfo.h>
14 #include <linux/platform_device.h>
15 #include <linux/spi/spi.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/scatterlist.h>
18 
19 #define DRIVER_NAME "rockchip-spi"
20 
21 #define ROCKCHIP_SPI_CLR_BITS(reg, bits) writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
22 #define ROCKCHIP_SPI_SET_BITS(reg, bits) writel_relaxed(readl_relaxed(reg) | (bits), reg)
23 
24 /* SPI register offsets */
25 #define ROCKCHIP_SPI_CTRLR0 0x0000
26 #define ROCKCHIP_SPI_CTRLR1 0x0004
27 #define ROCKCHIP_SPI_SSIENR 0x0008
28 #define ROCKCHIP_SPI_SER 0x000c
29 #define ROCKCHIP_SPI_BAUDR 0x0010
30 #define ROCKCHIP_SPI_TXFTLR 0x0014
31 #define ROCKCHIP_SPI_RXFTLR 0x0018
32 #define ROCKCHIP_SPI_TXFLR 0x001c
33 #define ROCKCHIP_SPI_RXFLR 0x0020
34 #define ROCKCHIP_SPI_SR 0x0024
35 #define ROCKCHIP_SPI_IPR 0x0028
36 #define ROCKCHIP_SPI_IMR 0x002c
37 #define ROCKCHIP_SPI_ISR 0x0030
38 #define ROCKCHIP_SPI_RISR 0x0034
39 #define ROCKCHIP_SPI_ICR 0x0038
40 #define ROCKCHIP_SPI_DMACR 0x003c
41 #define ROCKCHIP_SPI_DMATDLR 0x0040
42 #define ROCKCHIP_SPI_DMARDLR 0x0044
43 #define ROCKCHIP_SPI_VERSION 0x0048
44 #define ROCKCHIP_SPI_TXDR 0x0400
45 #define ROCKCHIP_SPI_RXDR 0x0800
46 
47 /* Bit fields in CTRLR0 */
48 #define CR0_DFS_OFFSET 0
49 #define CR0_DFS_4BIT 0x0
50 #define CR0_DFS_8BIT 0x1
51 #define CR0_DFS_16BIT 0x2
52 
53 #define CR0_CFS_OFFSET 2
54 
55 #define CR0_SCPH_OFFSET 6
56 
57 #define CR0_SCPOL_OFFSET 7
58 
59 #define CR0_CSM_OFFSET 8
60 #define CR0_CSM_KEEP 0x0
61 /* ss_n be high for half sclk_out cycles */
62 #define CR0_CSM_HALF 0X1
63 /* ss_n be high for one sclk_out cycle */
64 #define CR0_CSM_ONE 0x2
65 
66 /* ss_n to sclk_out delay */
67 #define CR0_SSD_OFFSET 10
68 /*
69  * The period between ss_n active and
70  * sclk_out active is half sclk_out cycles
71  */
72 #define CR0_SSD_HALF 0x0
73 /*
74  * The period between ss_n active and
75  * sclk_out active is one sclk_out cycle
76  */
77 #define CR0_SSD_ONE 0x1
78 
79 #define CR0_EM_OFFSET 11
80 #define CR0_EM_LITTLE 0x0
81 #define CR0_EM_BIG 0x1
82 
83 #define CR0_FBM_OFFSET 12
84 #define CR0_FBM_MSB 0x0
85 #define CR0_FBM_LSB 0x1
86 
87 #define CR0_BHT_OFFSET 13
88 #define CR0_BHT_16BIT 0x0
89 #define CR0_BHT_8BIT 0x1
90 
91 #define CR0_RSD_OFFSET 14
92 #define CR0_RSD_MAX 0x3
93 
94 #define CR0_FRF_OFFSET 16
95 #define CR0_FRF_SPI 0x0
96 #define CR0_FRF_SSP 0x1
97 #define CR0_FRF_MICROWIRE 0x2
98 
99 #define CR0_XFM_OFFSET 18
100 #define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
101 #define CR0_XFM_TR 0x0
102 #define CR0_XFM_TO 0x1
103 #define CR0_XFM_RO 0x2
104 
105 #define CR0_OPM_OFFSET 20
106 #define CR0_OPM_MASTER 0x0
107 #define CR0_OPM_SLAVE 0x1
108 
109 #define CR0_SOI_OFFSET 23
110 
111 #define CR0_MTM_OFFSET 0x21
112 
113 /* Bit fields in SER, 2bit */
114 #define SER_MASK 0x3
115 
116 /* Bit fields in BAUDR */
117 #define BAUDR_SCKDV_MIN 2
118 #define BAUDR_SCKDV_MAX 65534
119 
120 /* Bit fields in SR, 6bit */
121 #define SR_MASK 0x3f
122 #define SR_BUSY (1 << 0)
123 #define SR_TF_FULL (1 << 1)
124 #define SR_TF_EMPTY (1 << 2)
125 #define SR_RF_EMPTY (1 << 3)
126 #define SR_RF_FULL (1 << 4)
127 #define SR_SLAVE_TX_BUSY (1 << 5)
128 
129 /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
130 #define INT_MASK 0x1f
131 #define INT_TF_EMPTY (1 << 0)
132 #define INT_TF_OVERFLOW (1 << 1)
133 #define INT_RF_UNDERFLOW (1 << 2)
134 #define INT_RF_OVERFLOW (1 << 3)
135 #define INT_RF_FULL (1 << 4)
136 #define INT_CS_INACTIVE (1 << 6)
137 
138 /* Bit fields in ICR, 4bit */
139 #define ICR_MASK 0x0f
140 #define ICR_ALL (1 << 0)
141 #define ICR_RF_UNDERFLOW (1 << 1)
142 #define ICR_RF_OVERFLOW (1 << 2)
143 #define ICR_TF_OVERFLOW (1 << 3)
144 
145 /* Bit fields in DMACR */
146 #define RF_DMA_EN (1 << 0)
147 #define TF_DMA_EN (1 << 1)
148 
149 /* Driver state flags */
150 #define RXDMA (1 << 0)
151 #define TXDMA (1 << 1)
152 
153 /* sclk_out: spi master internal logic in rk3x can support 50Mhz */
154 #define MAX_SCLK_OUT 50000000U
155 /* max sclk of driver strength 4mA */
156 #define IO_DRIVER_4MA_MAX_SCLK_OUT 24000000U
157 
158 /*
159  * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
160  * the controller seems to hang when given 0x10000, so stick with this for now.
161  */
162 #define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
163 
164 /* 2 for native cs, 2 for cs-gpio */
165 #define ROCKCHIP_SPI_MAX_CS_NUM 4
166 #define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
167 #define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
168 
169 #define ROCKCHIP_SPI_FIFO_LEN_SIXTY_FOUR 64
170 #define ROCKCHIP_SPI_FIFO_LEN_THIRTY_TWO 32
171 #define ROCKCHIP_XFER_BITS_PER_WORD_FOUR 4
172 #define ROCKCHIP_XFER_BITS_PER_WORD_EIGHT 8
173 #define ROCKCHIP_XFER_BITS_PER_WORD_SIXTEEN 16
174 #define ROCKCHIP_XFER_LEN_DIV 2
175 #define ROCKCHIP_SPI_BAUDRATE_MUL 2
176 
177 struct rockchip_spi {
178     struct device *dev;
179 
180     struct clk *spiclk;
181     struct clk *apb_pclk;
182 
183     void __iomem *regs;
184     dma_addr_t dma_addr_rx;
185     dma_addr_t dma_addr_tx;
186 
187     const void *tx;
188     void *rx;
189     unsigned int tx_left;
190     unsigned int rx_left;
191 
192     atomic_t state;
193 
194     /* depth of the FIFO buffer */
195     u32 fifo_len;
196     /* frequency of spiclk */
197     u32 freq;
198 
199     u8 n_bytes;
200     u8 rsd;
201 
202     bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
203 
204     struct pinctrl_state *high_speed_state;
205     bool slave_abort;
206     bool cs_inactive;          /* spi slave tansmition stop when cs inactive */
207     struct spi_transfer *xfer; /* Store xfer temporarily */
208 };
209 
spi_enable_chip(struct rockchip_spi *rs, bool enable)210 static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
211 {
212     writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
213 }
214 
wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)215 static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
216 {
217     unsigned long timeout = jiffies + msecs_to_jiffies(5);
218 
219     do {
220         if (slave_mode) {
221             if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
222                 !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))) {
223                 return;
224             }
225         } else {
226             if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)) {
227                 return;
228             }
229         }
230     } while (!time_after(jiffies, timeout));
231 
232     dev_warn(rs->dev, "spi controller is in busy state!\n");
233 }
234 
get_fifo_len(struct rockchip_spi *rs)235 static u32 get_fifo_len(struct rockchip_spi *rs)
236 {
237     u32 ver;
238 
239     ver = readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION);
240 
241     switch (ver) {
242         case ROCKCHIP_SPI_VER2_TYPE1:
243         case ROCKCHIP_SPI_VER2_TYPE2:
244             return ROCKCHIP_SPI_FIFO_LEN_SIXTY_FOUR;
245         default:
246             return ROCKCHIP_SPI_FIFO_LEN_THIRTY_TWO;
247     }
248 }
249 
rockchip_spi_set_cs(struct spi_device *spi, bool enable)250 static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
251 {
252     struct spi_controller *ctlr = spi->controller;
253     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
254     bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
255 
256     /* Return immediately for no-op */
257     if (cs_asserted == rs->cs_asserted[spi->chip_select]) {
258         return;
259     }
260 
261     if (cs_asserted) {
262         /* Keep things powered as long as CS is asserted */
263         pm_runtime_get_sync(rs->dev);
264 
265         if (spi->cs_gpiod) {
266             ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
267         } else {
268             ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
269         }
270     } else {
271         if (spi->cs_gpiod) {
272             ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
273         } else {
274             ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
275         }
276 
277         /* Drop reference from when we first asserted CS */
278         pm_runtime_put(rs->dev);
279     }
280 
281     rs->cs_asserted[spi->chip_select] = cs_asserted;
282 }
283 
rockchip_spi_handle_err(struct spi_controller *ctlr, struct spi_message *msg)284 static void rockchip_spi_handle_err(struct spi_controller *ctlr, struct spi_message *msg)
285 {
286     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
287 
288     /* stop running spi transfer
289      * this also flushes both rx and tx fifos
290      */
291 
292     spi_enable_chip(rs, false);
293 
294     /* make sure all interrupts are masked */
295     writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
296 
297     if (atomic_read(&rs->state) & TXDMA) {
298         dmaengine_terminate_async(ctlr->dma_tx);
299     }
300 
301     if (atomic_read(&rs->state) & RXDMA) {
302         dmaengine_terminate_async(ctlr->dma_rx);
303     }
304 }
305 
rockchip_spi_pio_writer(struct rockchip_spi *rs)306 static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
307 {
308     u32 tx_free = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
309     u32 words = min(rs->tx_left, tx_free);
310 
311     rs->tx_left -= words;
312     for (; words; words--) {
313         u32 txw;
314 
315         if (rs->n_bytes == 1) {
316             txw = *(u8 *)rs->tx;
317         } else {
318             txw = *(u16 *)rs->tx;
319         }
320 
321         writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
322         rs->tx += rs->n_bytes;
323     }
324 }
325 
rockchip_spi_pio_reader(struct rockchip_spi *rs)326 static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
327 {
328     u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
329     u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
330 
331     /* the hardware doesn't allow us to change fifo threshold
332      * level while spi is enabled, so instead make sure to leave
333      * enough words in the rx fifo to get the last interrupt
334      * exactly when all words have been received
335      */
336     if (rx_left) {
337         u32 ftl = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFTLR) + 1;
338         if (rx_left < ftl) {
339             rx_left = ftl;
340             words = rs->rx_left - rx_left;
341         }
342     }
343 
344     rs->rx_left = rx_left;
345     for (; words; words--) {
346         u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
347 
348         if (!rs->rx) {
349             continue;
350         }
351 
352         if (rs->n_bytes == 1) {
353             *(u8 *)rs->rx = (u8)rxw;
354         } else {
355             *(u16 *)rs->rx = (u16)rxw;
356         }
357         rs->rx += rs->n_bytes;
358     }
359 }
360 
rockchip_spi_isr(int irq, void *dev_id)361 static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
362 {
363     struct spi_controller *ctlr = dev_id;
364     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
365 
366     /* When int_cs_inactive comes, spi slave abort */
367     if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
368         ctlr->slave_abort(ctlr);
369         writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
370         writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
371 
372         return IRQ_HANDLED;
373     }
374 
375     if (rs->tx_left) {
376         rockchip_spi_pio_writer(rs);
377     }
378 
379     rockchip_spi_pio_reader(rs);
380     if (!rs->rx_left) {
381         spi_enable_chip(rs, false);
382         writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
383         writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
384         spi_finalize_current_transfer(ctlr);
385     }
386 
387     return IRQ_HANDLED;
388 }
389 
rockchip_spi_prepare_irq(struct rockchip_spi *rs, struct spi_controller *ctlr, struct spi_transfer *xfer)390 static int rockchip_spi_prepare_irq(struct rockchip_spi *rs, struct spi_controller *ctlr, struct spi_transfer *xfer)
391 {
392     rs->tx = xfer->tx_buf;
393     rs->rx = xfer->rx_buf;
394     rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
395     rs->rx_left = xfer->len / rs->n_bytes;
396 
397     if (rs->cs_inactive) {
398         writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
399     } else {
400         writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
401     }
402     spi_enable_chip(rs, true);
403 
404     if (rs->tx_left) {
405         rockchip_spi_pio_writer(rs);
406     }
407 
408     /* 1 means the transfer is in progress */
409     return 1;
410 }
411 
rockchip_spi_dma_rxcb(void *data)412 static void rockchip_spi_dma_rxcb(void *data)
413 {
414     struct spi_controller *ctlr = data;
415     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
416     int state = atomic_fetch_andnot(RXDMA, &rs->state);
417     if (state & TXDMA && !rs->slave_abort) {
418         return;
419     }
420     if (rs->cs_inactive) {
421         writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
422     }
423     spi_enable_chip(rs, false);
424     spi_finalize_current_transfer(ctlr);
425 }
426 
rockchip_spi_dma_txcb(void *data)427 static void rockchip_spi_dma_txcb(void *data)
428 {
429     struct spi_controller *ctlr = data;
430     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
431     int state = atomic_fetch_andnot(TXDMA, &rs->state);
432     if (state & RXDMA && !rs->slave_abort) {
433         return;
434     }
435     /* Wait until the FIFO data completely. */
436     wait_for_tx_idle(rs, ctlr->slave);
437     spi_enable_chip(rs, false);
438     spi_finalize_current_transfer(ctlr);
439 }
440 
rockchip_spi_calc_burst_size(u32 data_len)441 static u32 rockchip_spi_calc_burst_size(u32 data_len)
442 {
443     u32 i;
444 
445     /* burst size: 1, 2, 4, 8 */
446     for (i = 1; i < 8; i <<= 1) {
447         if (data_len & i) {
448             break;
449         }
450     }
451 
452     return i;
453 }
454 
rockchip_spi_prepare_dma(struct rockchip_spi *rs, struct spi_controller *ctlr, struct spi_transfer *xfer)455 static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, struct spi_controller *ctlr, struct spi_transfer *xfer)
456 {
457     struct dma_async_tx_descriptor *rxdesc, *txdesc;
458 
459     atomic_set(&rs->state, 0);
460 
461     rs->tx = xfer->tx_buf;
462     rs->rx = xfer->rx_buf;
463 
464     rxdesc = NULL;
465     if (xfer->rx_buf) {
466         struct dma_slave_config rxconf = {
467             .direction = DMA_DEV_TO_MEM,
468             .src_addr = rs->dma_addr_rx,
469             .src_addr_width = rs->n_bytes,
470             .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
471         };
472 
473         dmaengine_slave_config(ctlr->dma_rx, &rxconf);
474 
475         rxdesc = dmaengine_prep_slave_sg(ctlr->dma_rx, xfer->rx_sg.sgl, xfer->rx_sg.nents, DMA_DEV_TO_MEM,
476                                          DMA_PREP_INTERRUPT);
477         if (!rxdesc) {
478             return -EINVAL;
479         }
480 
481         rxdesc->callback = rockchip_spi_dma_rxcb;
482         rxdesc->callback_param = ctlr;
483     }
484 
485     txdesc = NULL;
486     if (xfer->tx_buf) {
487         struct dma_slave_config txconf = {
488             .direction = DMA_MEM_TO_DEV,
489             .dst_addr = rs->dma_addr_tx,
490             .dst_addr_width = rs->n_bytes,
491             .dst_maxburst = rs->fifo_len / 4,
492         };
493 
494         dmaengine_slave_config(ctlr->dma_tx, &txconf);
495 
496         txdesc = dmaengine_prep_slave_sg(ctlr->dma_tx, xfer->tx_sg.sgl, xfer->tx_sg.nents, DMA_MEM_TO_DEV,
497                                          DMA_PREP_INTERRUPT);
498         if (!txdesc) {
499             if (rxdesc) {
500                 dmaengine_terminate_sync(ctlr->dma_rx);
501             }
502             return -EINVAL;
503         }
504 
505         txdesc->callback = rockchip_spi_dma_txcb;
506         txdesc->callback_param = ctlr;
507     }
508 
509     /* rx must be started before tx due to spi instinct */
510     if (rxdesc) {
511         atomic_or(RXDMA, &rs->state);
512         ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
513         dma_async_issue_pending(ctlr->dma_rx);
514     }
515 
516     if (rs->cs_inactive) {
517         writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
518     }
519 
520     spi_enable_chip(rs, true);
521 
522     if (txdesc) {
523         atomic_or(TXDMA, &rs->state);
524         dmaengine_submit(txdesc);
525         dma_async_issue_pending(ctlr->dma_tx);
526     }
527 
528     /* 1 means the transfer is in progress */
529     return 1;
530 }
531 
rockchip_spi_config(struct rockchip_spi *rs, struct spi_device *spi, struct spi_transfer *xfer, bool use_dma, bool slave_mode)532 static int rockchip_spi_config(struct rockchip_spi *rs, struct spi_device *spi, struct spi_transfer *xfer, bool use_dma,
533                                bool slave_mode)
534 {
535     u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET | CR0_BHT_8BIT << CR0_BHT_OFFSET | CR0_SSD_ONE << CR0_SSD_OFFSET |
536               CR0_EM_BIG << CR0_EM_OFFSET;
537     u32 cr1;
538     u32 dmacr = 0;
539 
540     if (slave_mode) {
541         cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET;
542     }
543     rs->slave_abort = false;
544 
545     cr0 |= rs->rsd << CR0_RSD_OFFSET;
546     cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
547     if (spi->mode & SPI_LSB_FIRST) {
548         cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
549     }
550     if (spi->mode & SPI_CS_HIGH) {
551         cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
552     }
553 
554     if (xfer->rx_buf && xfer->tx_buf) {
555         cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
556     } else if (xfer->rx_buf) {
557         cr0 |= CR0_XFM_RO << CR0_XFM_OFFSET;
558     } else if (use_dma) {
559         cr0 |= CR0_XFM_TO << CR0_XFM_OFFSET;
560     }
561 
562     switch (xfer->bits_per_word) {
563         case ROCKCHIP_XFER_BITS_PER_WORD_FOUR:
564             cr0 |= CR0_DFS_4BIT << CR0_DFS_OFFSET;
565             cr1 = xfer->len - 1;
566             break;
567         case ROCKCHIP_XFER_BITS_PER_WORD_EIGHT:
568             cr0 |= CR0_DFS_8BIT << CR0_DFS_OFFSET;
569             cr1 = xfer->len - 1;
570             break;
571         case ROCKCHIP_XFER_BITS_PER_WORD_SIXTEEN:
572             cr0 |= CR0_DFS_16BIT << CR0_DFS_OFFSET;
573             cr1 = xfer->len / ROCKCHIP_XFER_LEN_DIV - 1;
574             break;
575         default:
576             /* we only whitelist 4, 8 and 16 bit words in
577              * ctlr->bits_per_word_mask, so this shouldn't
578              * happen
579              */
580             dev_err(rs->dev, "unknown bits per word: %d\n", xfer->bits_per_word);
581             return -EINVAL;
582     }
583 
584     if (use_dma) {
585         if (xfer->tx_buf) {
586             dmacr |= TF_DMA_EN;
587         }
588         if (xfer->rx_buf) {
589             dmacr |= RF_DMA_EN;
590         }
591     }
592 
593     /*
594      * If speed is larger than IO_DRIVER_4MA_MAX_SCLK_OUT,
595      * set higher driver strength.
596      */
597     if (rs->high_speed_state) {
598         if (rs->freq > IO_DRIVER_4MA_MAX_SCLK_OUT) {
599             pinctrl_select_state(rs->dev->pins->p, rs->high_speed_state);
600         } else {
601             pinctrl_select_state(rs->dev->pins->p, rs->dev->pins->default_state);
602         }
603     }
604 
605     writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
606     writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
607 
608     /* unfortunately setting the fifo threshold level to generate an
609      * interrupt exactly when the fifo is full doesn't seem to work,
610      * so we need the strict inequality here
611      */
612     if ((xfer->len / rs->n_bytes) < rs->fifo_len) {
613         writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
614     } else {
615         writel_relaxed(rs->fifo_len / ROCKCHIP_XFER_LEN_DIV - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
616     }
617 
618     writel_relaxed(rs->fifo_len / ROCKCHIP_XFER_LEN_DIV - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
619     writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1, rs->regs + ROCKCHIP_SPI_DMARDLR);
620     writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
621 
622     /* the hardware only supports an even clock divisor, so
623      * round divisor = spiclk / speed up to nearest even number
624      * so that the resulting speed is <= the requested speed
625      */
626     writel_relaxed(ROCKCHIP_SPI_BAUDRATE_MUL * DIV_ROUND_UP(rs->freq, ROCKCHIP_SPI_BAUDRATE_MUL * xfer->speed_hz),
627                    rs->regs + ROCKCHIP_SPI_BAUDR);
628 
629     return 0;
630 }
631 
rockchip_spi_max_transfer_size(struct spi_device *spi)632 static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
633 {
634     return ROCKCHIP_SPI_MAX_TRANLEN;
635 }
636 
rockchip_spi_slave_abort(struct spi_controller *ctlr)637 static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
638 {
639     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
640     u32 rx_fifo_left;
641     struct dma_tx_state state;
642     enum dma_status status;
643 
644     if (atomic_read(&rs->state) & RXDMA)
645         dmaengine_terminate_sync(ctlr->dma_rx);
646     if (atomic_read(&rs->state) & TXDMA)
647         dmaengine_terminate_sync(ctlr->dma_tx);
648 
649     /* Get current dma rx point */
650     if (atomic_read(&rs->state) & RXDMA) {
651         dmaengine_pause(ctlr->dma_rx);
652         status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
653         dmaengine_terminate_sync(ctlr->dma_rx);
654         atomic_set(&rs->state, 0);
655         if (status == DMA_ERROR) {
656             rs->rx = rs->xfer->rx_buf;
657             rs->xfer->len = 0;
658             rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
659             for (; rx_fifo_left; rx_fifo_left--) {
660                 readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
661             }
662             goto out;
663         } else {
664             rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
665         }
666     }
667 
668     /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
669     if (rs->rx) {
670         rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
671         for (; rx_fifo_left; rx_fifo_left--) {
672             u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
673 
674             if (rs->n_bytes == 1) {
675                 *(u8 *)rs->rx = (u8)rxw;
676             } else {
677                 *(u16 *)rs->rx = (u16)rxw;
678             }
679             rs->rx += rs->n_bytes;
680         }
681 
682         rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
683     }
684 
685 out:
686     atomic_set(&rs->state, 0);
687     spi_enable_chip(rs, false);
688     rs->slave_abort = true;
689     complete(&ctlr->xfer_completion);
690 
691     return 0;
692 }
693 
rockchip_spi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer)694 static int rockchip_spi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer)
695 {
696     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
697     int ret;
698     bool use_dma;
699 
700     WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
701 
702     if (!xfer->tx_buf && !xfer->rx_buf) {
703         dev_err(rs->dev, "No buffer for transfer\n");
704         return -EINVAL;
705     }
706 
707     if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
708         dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
709         return -EINVAL;
710     }
711 
712     rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
713     rs->xfer = xfer;
714     use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
715 
716     ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
717     if (ret) {
718         return ret;
719     }
720 
721     if (use_dma) {
722         return rockchip_spi_prepare_dma(rs, ctlr, xfer);
723     }
724 
725     return rockchip_spi_prepare_irq(rs, ctlr, xfer);
726 }
727 
rockchip_spi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer)728 static bool rockchip_spi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer)
729 {
730     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
731     unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
732 
733     /* if the numbor of spi words to transfer is less than the fifo
734      * length we can just fill the fifo and wait for a single irq,
735      * so don't bother setting up dma
736      */
737     return xfer->len / bytes_per_word >= rs->fifo_len;
738 }
739 
rockchip_spi_probe(struct platform_device *pdev)740 static int rockchip_spi_probe(struct platform_device *pdev)
741 {
742     int ret;
743     struct rockchip_spi *rs;
744     struct spi_controller *ctlr;
745     struct resource *mem;
746     struct device_node *np = pdev->dev.of_node;
747     u32 rsd_nsecs, num_cs;
748     bool slave_mode;
749     struct pinctrl *pinctrl = NULL;
750     slave_mode = of_property_read_bool(np, "spi-slave");
751     if (slave_mode) {
752         ctlr = spi_alloc_slave(&pdev->dev, sizeof(struct rockchip_spi));
753     } else {
754         ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
755     }
756     if (!ctlr) {
757         return -ENOMEM;
758     }
759 
760     platform_set_drvdata(pdev, ctlr);
761 
762     rs = spi_controller_get_devdata(ctlr);
763     ctlr->slave = slave_mode;
764 
765     /* Get basic io resource and map it */
766     mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
767     rs->regs = devm_ioremap_resource(&pdev->dev, mem);
768     if (IS_ERR(rs->regs)) {
769         ret = PTR_ERR(rs->regs);
770         goto err_put_ctlr;
771     }
772 
773     rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
774     if (IS_ERR(rs->apb_pclk)) {
775         dev_err(&pdev->dev, "Failed to get apb_pclk\n");
776         ret = PTR_ERR(rs->apb_pclk);
777         goto err_put_ctlr;
778     }
779 
780     rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
781     if (IS_ERR(rs->spiclk)) {
782         dev_err(&pdev->dev, "Failed to get spi_pclk\n");
783         ret = PTR_ERR(rs->spiclk);
784         goto err_put_ctlr;
785     }
786 
787     ret = clk_prepare_enable(rs->apb_pclk);
788     if (ret < 0) {
789         dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
790         goto err_put_ctlr;
791     }
792 
793     ret = clk_prepare_enable(rs->spiclk);
794     if (ret < 0) {
795         dev_err(&pdev->dev, "Failed to enable spi_clk\n");
796         goto err_disable_apbclk;
797     }
798 
799     spi_enable_chip(rs, false);
800 
801     ret = platform_get_irq(pdev, 0);
802     if (ret < 0) {
803         goto err_disable_spiclk;
804     }
805 
806     ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL, IRQF_ONESHOT, dev_name(&pdev->dev), ctlr);
807     if (ret) {
808         goto err_disable_spiclk;
809     }
810 
811     rs->dev = &pdev->dev;
812     rs->freq = clk_get_rate(rs->spiclk);
813 
814     if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", &rsd_nsecs)) {
815         /* rx sample delay is expressed in parent clock cycles (max 3) */
816         u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8), 1000000000 >> 8);
817         if (!rsd) {
818             dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n", rs->freq, rsd_nsecs);
819         } else if (rsd > CR0_RSD_MAX) {
820             rsd = CR0_RSD_MAX;
821             dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n", rs->freq, rsd_nsecs,
822                      CR0_RSD_MAX * 1000000000U / rs->freq);
823         }
824         rs->rsd = rsd;
825     }
826 
827     rs->fifo_len = get_fifo_len(rs);
828     if (!rs->fifo_len) {
829         dev_err(&pdev->dev, "Failed to get fifo length\n");
830         ret = -EINVAL;
831         goto err_disable_spiclk;
832     }
833 
834     pm_runtime_set_active(&pdev->dev);
835     pm_runtime_enable(&pdev->dev);
836 
837     ctlr->auto_runtime_pm = true;
838     ctlr->bus_num = pdev->id;
839     ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
840     if (slave_mode) {
841         ctlr->mode_bits |= SPI_NO_CS;
842         ctlr->slave_abort = rockchip_spi_slave_abort;
843     } else {
844         ctlr->flags = SPI_MASTER_GPIO_SS;
845         ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
846         /*
847          * rk spi0 has two native cs, spi1..5 one cs only
848          * if num-cs is missing in the dts, default to 1
849          */
850         if (of_property_read_u32(np, "num-cs", &num_cs))
851             num_cs = 1;
852         ctlr->num_chipselect = num_cs;
853         ctlr->use_gpio_descriptors = true;
854     }
855     ctlr->dev.of_node = pdev->dev.of_node;
856     ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
857     ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
858     ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
859 
860     ctlr->set_cs = rockchip_spi_set_cs;
861     ctlr->transfer_one = rockchip_spi_transfer_one;
862     ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
863     ctlr->handle_err = rockchip_spi_handle_err;
864 
865     ctlr->dma_tx = dma_request_chan(rs->dev, "tx");
866     if (IS_ERR(ctlr->dma_tx)) {
867         /* Check tx to see if we need defer probing driver */
868         if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) {
869             ret = -EPROBE_DEFER;
870             goto err_disable_pm_runtime;
871         }
872         dev_warn(rs->dev, "Failed to request TX DMA channel\n");
873         ctlr->dma_tx = NULL;
874     }
875 
876     ctlr->dma_rx = dma_request_chan(rs->dev, "rx");
877     if (IS_ERR(ctlr->dma_rx)) {
878         if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) {
879             ret = -EPROBE_DEFER;
880             goto err_free_dma_tx;
881         }
882         dev_warn(rs->dev, "Failed to request RX DMA channel\n");
883         ctlr->dma_rx = NULL;
884     }
885 
886     if (ctlr->dma_tx && ctlr->dma_rx) {
887         rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
888         rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
889         ctlr->can_dma = rockchip_spi_can_dma;
890     }
891 
892     switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
893         case ROCKCHIP_SPI_VER2_TYPE2:
894             ctlr->mode_bits |= SPI_CS_HIGH;
895             if (ctlr->can_dma && slave_mode) {
896                 rs->cs_inactive = true;
897             } else {
898                 rs->cs_inactive = false;
899             }
900             break;
901         default:
902             rs->cs_inactive = false;
903             break;
904     }
905 
906     pinctrl = devm_pinctrl_get(&pdev->dev);
907     if (!IS_ERR(pinctrl)) {
908         rs->high_speed_state = pinctrl_lookup_state(pinctrl, "high_speed");
909         if (IS_ERR_OR_NULL(rs->high_speed_state)) {
910             dev_warn(&pdev->dev, "no high_speed pinctrl state\n");
911             rs->high_speed_state = NULL;
912         }
913     }
914 
915     ret = devm_spi_register_controller(&pdev->dev, ctlr);
916     if (ret < 0) {
917         dev_err(&pdev->dev, "Failed to register controller\n");
918         goto err_free_dma_rx;
919     }
920 
921     return 0;
922 
923 err_free_dma_rx:
924     if (ctlr->dma_rx) {
925         dma_release_channel(ctlr->dma_rx);
926     }
927 err_free_dma_tx:
928     if (ctlr->dma_tx) {
929         dma_release_channel(ctlr->dma_tx);
930     }
931 err_disable_pm_runtime:
932     pm_runtime_disable(&pdev->dev);
933 err_disable_spiclk:
934     clk_disable_unprepare(rs->spiclk);
935 err_disable_apbclk:
936     clk_disable_unprepare(rs->apb_pclk);
937 err_put_ctlr:
938     spi_controller_put(ctlr);
939 
940     return ret;
941 }
942 
rockchip_spi_remove(struct platform_device *pdev)943 static int rockchip_spi_remove(struct platform_device *pdev)
944 {
945     struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev));
946     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
947 
948     pm_runtime_get_sync(&pdev->dev);
949 
950     clk_disable_unprepare(rs->spiclk);
951     clk_disable_unprepare(rs->apb_pclk);
952 
953     pm_runtime_put_noidle(&pdev->dev);
954     pm_runtime_disable(&pdev->dev);
955     pm_runtime_set_suspended(&pdev->dev);
956 
957     if (ctlr->dma_tx) {
958         dma_release_channel(ctlr->dma_tx);
959     }
960     if (ctlr->dma_rx) {
961         dma_release_channel(ctlr->dma_rx);
962     }
963 
964     spi_controller_put(ctlr);
965 
966     return 0;
967 }
968 
969 #ifdef CONFIG_PM_SLEEP
rockchip_spi_suspend(struct device *dev)970 static int rockchip_spi_suspend(struct device *dev)
971 {
972     int ret;
973     struct spi_controller *ctlr = dev_get_drvdata(dev);
974 
975     ret = spi_controller_suspend(ctlr);
976     if (ret < 0) {
977         return ret;
978     }
979 
980     ret = pm_runtime_force_suspend(dev);
981     if (ret < 0) {
982         return ret;
983     }
984 
985     pinctrl_pm_select_sleep_state(dev);
986 
987     return 0;
988 }
989 
rockchip_spi_resume(struct device *dev)990 static int rockchip_spi_resume(struct device *dev)
991 {
992     int ret;
993     struct spi_controller *ctlr = dev_get_drvdata(dev);
994     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
995 
996     pinctrl_pm_select_default_state(dev);
997 
998     ret = pm_runtime_force_resume(dev);
999     if (ret < 0) {
1000         return ret;
1001     }
1002 
1003     ret = spi_controller_resume(ctlr);
1004     if (ret < 0) {
1005         clk_disable_unprepare(rs->spiclk);
1006         clk_disable_unprepare(rs->apb_pclk);
1007     }
1008 
1009     return 0;
1010 }
1011 #endif /* CONFIG_PM_SLEEP */
1012 
1013 #ifdef CONFIG_PM
rockchip_spi_runtime_suspend(struct device *dev)1014 static int rockchip_spi_runtime_suspend(struct device *dev)
1015 {
1016     struct spi_controller *ctlr = dev_get_drvdata(dev);
1017     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
1018 
1019     clk_disable_unprepare(rs->spiclk);
1020     clk_disable_unprepare(rs->apb_pclk);
1021 
1022     return 0;
1023 }
1024 
rockchip_spi_runtime_resume(struct device *dev)1025 static int rockchip_spi_runtime_resume(struct device *dev)
1026 {
1027     int ret;
1028     struct spi_controller *ctlr = dev_get_drvdata(dev);
1029     struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
1030 
1031     ret = clk_prepare_enable(rs->apb_pclk);
1032     if (ret < 0) {
1033         return ret;
1034     }
1035 
1036     ret = clk_prepare_enable(rs->spiclk);
1037     if (ret < 0) {
1038         clk_disable_unprepare(rs->apb_pclk);
1039     }
1040 
1041     return 0;
1042 }
1043 #endif /* CONFIG_PM */
1044 
1045 static const struct dev_pm_ops rockchip_spi_pm = {
1046     SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
1047         SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend, rockchip_spi_runtime_resume, NULL)};
1048 
1049 static const struct of_device_id rockchip_spi_dt_match[] = {
1050     {
1051         .compatible = "rockchip,px30-spi",
1052     },
1053     {
1054         .compatible = "rockchip,rk3036-spi",
1055     },
1056     {
1057         .compatible = "rockchip,rk3066-spi",
1058     },
1059     {
1060         .compatible = "rockchip,rk3188-spi",
1061     },
1062     {
1063         .compatible = "rockchip,rk3228-spi",
1064     },
1065     {
1066         .compatible = "rockchip,rk3288-spi",
1067     },
1068     {
1069         .compatible = "rockchip,rk3308-spi",
1070     },
1071     {
1072         .compatible = "rockchip,rk3328-spi",
1073     },
1074     {
1075         .compatible = "rockchip,rk3368-spi",
1076     },
1077     {
1078         .compatible = "rockchip,rk3399-spi",
1079     },
1080     {
1081         .compatible = "rockchip,rv1108-spi",
1082     },
1083     {
1084         .compatible = "rockchip,rv1126-spi",
1085     },
1086     {},
1087 };
1088 MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
1089 
1090 static struct platform_driver rockchip_spi_driver = {
1091     .driver =
1092         {
1093             .name = DRIVER_NAME,
1094             .pm = &rockchip_spi_pm,
1095             .of_match_table = of_match_ptr(rockchip_spi_dt_match),
1096         },
1097     .probe = rockchip_spi_probe,
1098     .remove = rockchip_spi_remove,
1099 };
1100 
1101 module_platform_driver(rockchip_spi_driver);
1102 
1103 MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
1104 MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
1105 MODULE_LICENSE("GPL v2");
1106