xref: /kernel/linux/linux-6.6/drivers/soc/fsl/qe/qmc.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * QMC driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10#include <soc/fsl/qe/qmc.h>
11#include <linux/dma-mapping.h>
12#include <linux/hdlc.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_platform.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <soc/fsl/cpm.h>
21#include <sysdev/fsl_soc.h>
22#include "tsa.h"
23
24/* SCC general mode register high (32 bits) */
25#define SCC_GSMRL	0x00
26#define SCC_GSMRL_ENR		(1 << 5)
27#define SCC_GSMRL_ENT		(1 << 4)
28#define SCC_GSMRL_MODE_QMC	(0x0A << 0)
29
30/* SCC general mode register low (32 bits) */
31#define SCC_GSMRH	0x04
32#define   SCC_GSMRH_CTSS	(1 << 7)
33#define   SCC_GSMRH_CDS		(1 << 8)
34#define   SCC_GSMRH_CTSP	(1 << 9)
35#define   SCC_GSMRH_CDP		(1 << 10)
36
37/* SCC event register (16 bits) */
38#define SCC_SCCE	0x10
39#define   SCC_SCCE_IQOV		(1 << 3)
40#define   SCC_SCCE_GINT		(1 << 2)
41#define   SCC_SCCE_GUN		(1 << 1)
42#define   SCC_SCCE_GOV		(1 << 0)
43
44/* SCC mask register (16 bits) */
45#define SCC_SCCM	0x14
46/* Multichannel base pointer (32 bits) */
47#define QMC_GBL_MCBASE		0x00
48/* Multichannel controller state (16 bits) */
49#define QMC_GBL_QMCSTATE	0x04
50/* Maximum receive buffer length (16 bits) */
51#define QMC_GBL_MRBLR		0x06
52/* Tx time-slot assignment table pointer (16 bits) */
53#define QMC_GBL_TX_S_PTR	0x08
54/* Rx pointer (16 bits) */
55#define QMC_GBL_RXPTR		0x0A
56/* Global receive frame threshold (16 bits) */
57#define QMC_GBL_GRFTHR		0x0C
58/* Global receive frame count (16 bits) */
59#define QMC_GBL_GRFCNT		0x0E
60/* Multichannel interrupt base address (32 bits) */
61#define QMC_GBL_INTBASE		0x10
62/* Multichannel interrupt pointer (32 bits) */
63#define QMC_GBL_INTPTR		0x14
64/* Rx time-slot assignment table pointer (16 bits) */
65#define QMC_GBL_RX_S_PTR	0x18
66/* Tx pointer (16 bits) */
67#define QMC_GBL_TXPTR		0x1A
68/* CRC constant (32 bits) */
69#define QMC_GBL_C_MASK32	0x1C
70/* Time slot assignment table Rx (32 x 16 bits) */
71#define QMC_GBL_TSATRX		0x20
72/* Time slot assignment table Tx (32 x 16 bits) */
73#define QMC_GBL_TSATTX		0x60
74/* CRC constant (16 bits) */
75#define QMC_GBL_C_MASK16	0xA0
76
77/* TSA entry (16bit entry in TSATRX and TSATTX) */
78#define QMC_TSA_VALID		(1 << 15)
79#define QMC_TSA_WRAP		(1 << 14)
80#define QMC_TSA_MASK		(0x303F)
81#define QMC_TSA_CHANNEL(x)	((x) << 6)
82
83/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
84#define QMC_SPE_TBASE	0x00
85
86/* Channel mode register (16 bits) */
87#define QMC_SPE_CHAMR	0x02
88#define   QMC_SPE_CHAMR_MODE_HDLC	(1 << 15)
89#define   QMC_SPE_CHAMR_MODE_TRANSP	((0 << 15) | (1 << 13))
90#define   QMC_SPE_CHAMR_ENT		(1 << 12)
91#define   QMC_SPE_CHAMR_POL		(1 << 8)
92#define   QMC_SPE_CHAMR_HDLC_IDLM	(1 << 13)
93#define   QMC_SPE_CHAMR_HDLC_CRC	(1 << 7)
94#define   QMC_SPE_CHAMR_HDLC_NOF	(0x0f << 0)
95#define   QMC_SPE_CHAMR_TRANSP_RD	(1 << 14)
96#define   QMC_SPE_CHAMR_TRANSP_SYNC	(1 << 10)
97
98/* Tx internal state (32 bits) */
99#define QMC_SPE_TSTATE	0x04
100/* Tx buffer descriptor pointer (16 bits) */
101#define QMC_SPE_TBPTR	0x0C
102/* Zero-insertion state (32 bits) */
103#define QMC_SPE_ZISTATE	0x14
104/* Channel’s interrupt mask flags (16 bits) */
105#define QMC_SPE_INTMSK	0x1C
106/* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
107#define QMC_SPE_RBASE	0x20
108/* HDLC: Maximum frame length register (16 bits) */
109#define QMC_SPE_MFLR	0x22
110/* TRANSPARENT: Transparent maximum receive length (16 bits) */
111#define QMC_SPE_TMRBLR	0x22
112/* Rx internal state (32 bits) */
113#define QMC_SPE_RSTATE	0x24
114/* Rx buffer descriptor pointer (16 bits) */
115#define QMC_SPE_RBPTR	0x2C
116/* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
117#define QMC_SPE_RPACK	0x30
118/* Zero deletion state (32 bits) */
119#define QMC_SPE_ZDSTATE	0x34
120
121/* Transparent synchronization (16 bits) */
122#define QMC_SPE_TRNSYNC 0x3C
123#define   QMC_SPE_TRNSYNC_RX(x)	((x) << 8)
124#define   QMC_SPE_TRNSYNC_TX(x)	((x) << 0)
125
126/* Interrupt related registers bits */
127#define QMC_INT_V		(1 << 15)
128#define QMC_INT_W		(1 << 14)
129#define QMC_INT_NID		(1 << 13)
130#define QMC_INT_IDL		(1 << 12)
131#define QMC_INT_GET_CHANNEL(x)	(((x) & 0x0FC0) >> 6)
132#define QMC_INT_MRF		(1 << 5)
133#define QMC_INT_UN		(1 << 4)
134#define QMC_INT_RXF		(1 << 3)
135#define QMC_INT_BSY		(1 << 2)
136#define QMC_INT_TXB		(1 << 1)
137#define QMC_INT_RXB		(1 << 0)
138
139/* BD related registers bits */
140#define QMC_BD_RX_E	(1 << 15)
141#define QMC_BD_RX_W	(1 << 13)
142#define QMC_BD_RX_I	(1 << 12)
143#define QMC_BD_RX_L	(1 << 11)
144#define QMC_BD_RX_F	(1 << 10)
145#define QMC_BD_RX_CM	(1 << 9)
146#define QMC_BD_RX_UB	(1 << 7)
147#define QMC_BD_RX_LG	(1 << 5)
148#define QMC_BD_RX_NO	(1 << 4)
149#define QMC_BD_RX_AB	(1 << 3)
150#define QMC_BD_RX_CR	(1 << 2)
151
152#define QMC_BD_TX_R	(1 << 15)
153#define QMC_BD_TX_W	(1 << 13)
154#define QMC_BD_TX_I	(1 << 12)
155#define QMC_BD_TX_L	(1 << 11)
156#define QMC_BD_TX_TC	(1 << 10)
157#define QMC_BD_TX_CM	(1 << 9)
158#define QMC_BD_TX_UB	(1 << 7)
159#define QMC_BD_TX_PAD	(0x0f << 0)
160
161/* Numbers of BDs and interrupt items */
162#define QMC_NB_TXBDS	8
163#define QMC_NB_RXBDS	8
164#define QMC_NB_INTS	128
165
166struct qmc_xfer_desc {
167	union {
168		void (*tx_complete)(void *context);
169		void (*rx_complete)(void *context, size_t length);
170	};
171	void *context;
172};
173
174struct qmc_chan {
175	struct list_head list;
176	unsigned int id;
177	struct qmc *qmc;
178	void __iomem *s_param;
179	enum qmc_mode mode;
180	u64	tx_ts_mask;
181	u64	rx_ts_mask;
182	bool is_reverse_data;
183
184	spinlock_t	tx_lock;
185	cbd_t __iomem *txbds;
186	cbd_t __iomem *txbd_free;
187	cbd_t __iomem *txbd_done;
188	struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
189	u64	nb_tx_underrun;
190	bool	is_tx_stopped;
191
192	spinlock_t	rx_lock;
193	cbd_t __iomem *rxbds;
194	cbd_t __iomem *rxbd_free;
195	cbd_t __iomem *rxbd_done;
196	struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
197	u64	nb_rx_busy;
198	int	rx_pending;
199	bool	is_rx_halted;
200	bool	is_rx_stopped;
201};
202
203struct qmc {
204	struct device *dev;
205	struct tsa_serial *tsa_serial;
206	void __iomem *scc_regs;
207	void __iomem *scc_pram;
208	void __iomem *dpram;
209	u16 scc_pram_offset;
210	cbd_t __iomem *bd_table;
211	dma_addr_t bd_dma_addr;
212	size_t bd_size;
213	u16 __iomem *int_table;
214	u16 __iomem *int_curr;
215	dma_addr_t int_dma_addr;
216	size_t int_size;
217	struct list_head chan_head;
218	struct qmc_chan *chans[64];
219};
220
221static inline void qmc_write16(void __iomem *addr, u16 val)
222{
223	iowrite16be(val, addr);
224}
225
226static inline u16 qmc_read16(void __iomem *addr)
227{
228	return ioread16be(addr);
229}
230
231static inline void qmc_setbits16(void __iomem *addr, u16 set)
232{
233	qmc_write16(addr, qmc_read16(addr) | set);
234}
235
236static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
237{
238	qmc_write16(addr, qmc_read16(addr) & ~clr);
239}
240
241static inline void qmc_write32(void __iomem *addr, u32 val)
242{
243	iowrite32be(val, addr);
244}
245
246static inline u32 qmc_read32(void __iomem *addr)
247{
248	return ioread32be(addr);
249}
250
251static inline void qmc_setbits32(void __iomem *addr, u32 set)
252{
253	qmc_write32(addr, qmc_read32(addr) | set);
254}
255
256
257int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
258{
259	struct tsa_serial_info tsa_info;
260	int ret;
261
262	/* Retrieve info from the TSA related serial */
263	ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
264	if (ret)
265		return ret;
266
267	info->mode = chan->mode;
268	info->rx_fs_rate = tsa_info.rx_fs_rate;
269	info->rx_bit_rate = tsa_info.rx_bit_rate;
270	info->nb_tx_ts = hweight64(chan->tx_ts_mask);
271	info->tx_fs_rate = tsa_info.tx_fs_rate;
272	info->tx_bit_rate = tsa_info.tx_bit_rate;
273	info->nb_rx_ts = hweight64(chan->rx_ts_mask);
274
275	return 0;
276}
277EXPORT_SYMBOL(qmc_chan_get_info);
278
279int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
280{
281	if (param->mode != chan->mode)
282		return -EINVAL;
283
284	switch (param->mode) {
285	case QMC_HDLC:
286		if ((param->hdlc.max_rx_buf_size % 4) ||
287		    (param->hdlc.max_rx_buf_size < 8))
288			return -EINVAL;
289
290		qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
291			    param->hdlc.max_rx_buf_size - 8);
292		qmc_write16(chan->s_param + QMC_SPE_MFLR,
293			    param->hdlc.max_rx_frame_size);
294		if (param->hdlc.is_crc32) {
295			qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
296				      QMC_SPE_CHAMR_HDLC_CRC);
297		} else {
298			qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
299				      QMC_SPE_CHAMR_HDLC_CRC);
300		}
301		break;
302
303	case QMC_TRANSPARENT:
304		qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
305			    param->transp.max_rx_buf_size);
306		break;
307
308	default:
309		return -EINVAL;
310	}
311
312	return 0;
313}
314EXPORT_SYMBOL(qmc_chan_set_param);
315
316int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
317			  void (*complete)(void *context), void *context)
318{
319	struct qmc_xfer_desc *xfer_desc;
320	unsigned long flags;
321	cbd_t __iomem *bd;
322	u16 ctrl;
323	int ret;
324
325	/*
326	 * R bit  UB bit
327	 *   0       0  : The BD is free
328	 *   1       1  : The BD is in used, waiting for transfer
329	 *   0       1  : The BD is in used, waiting for completion
330	 *   1       0  : Should not append
331	 */
332
333	spin_lock_irqsave(&chan->tx_lock, flags);
334	bd = chan->txbd_free;
335
336	ctrl = qmc_read16(&bd->cbd_sc);
337	if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
338		/* We are full ... */
339		ret = -EBUSY;
340		goto end;
341	}
342
343	qmc_write16(&bd->cbd_datlen, length);
344	qmc_write32(&bd->cbd_bufaddr, addr);
345
346	xfer_desc = &chan->tx_desc[bd - chan->txbds];
347	xfer_desc->tx_complete = complete;
348	xfer_desc->context = context;
349
350	/* Activate the descriptor */
351	ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
352	wmb(); /* Be sure to flush the descriptor before control update */
353	qmc_write16(&bd->cbd_sc, ctrl);
354
355	if (!chan->is_tx_stopped)
356		qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
357
358	if (ctrl & QMC_BD_TX_W)
359		chan->txbd_free = chan->txbds;
360	else
361		chan->txbd_free++;
362
363	ret = 0;
364
365end:
366	spin_unlock_irqrestore(&chan->tx_lock, flags);
367	return ret;
368}
369EXPORT_SYMBOL(qmc_chan_write_submit);
370
371static void qmc_chan_write_done(struct qmc_chan *chan)
372{
373	struct qmc_xfer_desc *xfer_desc;
374	void (*complete)(void *context);
375	unsigned long flags;
376	void *context;
377	cbd_t __iomem *bd;
378	u16 ctrl;
379
380	/*
381	 * R bit  UB bit
382	 *   0       0  : The BD is free
383	 *   1       1  : The BD is in used, waiting for transfer
384	 *   0       1  : The BD is in used, waiting for completion
385	 *   1       0  : Should not append
386	 */
387
388	spin_lock_irqsave(&chan->tx_lock, flags);
389	bd = chan->txbd_done;
390
391	ctrl = qmc_read16(&bd->cbd_sc);
392	while (!(ctrl & QMC_BD_TX_R)) {
393		if (!(ctrl & QMC_BD_TX_UB))
394			goto end;
395
396		xfer_desc = &chan->tx_desc[bd - chan->txbds];
397		complete = xfer_desc->tx_complete;
398		context = xfer_desc->context;
399		xfer_desc->tx_complete = NULL;
400		xfer_desc->context = NULL;
401
402		qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
403
404		if (ctrl & QMC_BD_TX_W)
405			chan->txbd_done = chan->txbds;
406		else
407			chan->txbd_done++;
408
409		if (complete) {
410			spin_unlock_irqrestore(&chan->tx_lock, flags);
411			complete(context);
412			spin_lock_irqsave(&chan->tx_lock, flags);
413		}
414
415		bd = chan->txbd_done;
416		ctrl = qmc_read16(&bd->cbd_sc);
417	}
418
419end:
420	spin_unlock_irqrestore(&chan->tx_lock, flags);
421}
422
423int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
424			 void (*complete)(void *context, size_t length), void *context)
425{
426	struct qmc_xfer_desc *xfer_desc;
427	unsigned long flags;
428	cbd_t __iomem *bd;
429	u16 ctrl;
430	int ret;
431
432	/*
433	 * E bit  UB bit
434	 *   0       0  : The BD is free
435	 *   1       1  : The BD is in used, waiting for transfer
436	 *   0       1  : The BD is in used, waiting for completion
437	 *   1       0  : Should not append
438	 */
439
440	spin_lock_irqsave(&chan->rx_lock, flags);
441	bd = chan->rxbd_free;
442
443	ctrl = qmc_read16(&bd->cbd_sc);
444	if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
445		/* We are full ... */
446		ret = -EBUSY;
447		goto end;
448	}
449
450	qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
451	qmc_write32(&bd->cbd_bufaddr, addr);
452
453	xfer_desc = &chan->rx_desc[bd - chan->rxbds];
454	xfer_desc->rx_complete = complete;
455	xfer_desc->context = context;
456
457	/* Activate the descriptor */
458	ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
459	wmb(); /* Be sure to flush data before descriptor activation */
460	qmc_write16(&bd->cbd_sc, ctrl);
461
462	/* Restart receiver if needed */
463	if (chan->is_rx_halted && !chan->is_rx_stopped) {
464		/* Restart receiver */
465		if (chan->mode == QMC_TRANSPARENT)
466			qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
467		else
468			qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
469		qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
470		chan->is_rx_halted = false;
471	}
472	chan->rx_pending++;
473
474	if (ctrl & QMC_BD_RX_W)
475		chan->rxbd_free = chan->rxbds;
476	else
477		chan->rxbd_free++;
478
479	ret = 0;
480end:
481	spin_unlock_irqrestore(&chan->rx_lock, flags);
482	return ret;
483}
484EXPORT_SYMBOL(qmc_chan_read_submit);
485
486static void qmc_chan_read_done(struct qmc_chan *chan)
487{
488	void (*complete)(void *context, size_t size);
489	struct qmc_xfer_desc *xfer_desc;
490	unsigned long flags;
491	cbd_t __iomem *bd;
492	void *context;
493	u16 datalen;
494	u16 ctrl;
495
496	/*
497	 * E bit  UB bit
498	 *   0       0  : The BD is free
499	 *   1       1  : The BD is in used, waiting for transfer
500	 *   0       1  : The BD is in used, waiting for completion
501	 *   1       0  : Should not append
502	 */
503
504	spin_lock_irqsave(&chan->rx_lock, flags);
505	bd = chan->rxbd_done;
506
507	ctrl = qmc_read16(&bd->cbd_sc);
508	while (!(ctrl & QMC_BD_RX_E)) {
509		if (!(ctrl & QMC_BD_RX_UB))
510			goto end;
511
512		xfer_desc = &chan->rx_desc[bd - chan->rxbds];
513		complete = xfer_desc->rx_complete;
514		context = xfer_desc->context;
515		xfer_desc->rx_complete = NULL;
516		xfer_desc->context = NULL;
517
518		datalen = qmc_read16(&bd->cbd_datlen);
519		qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
520
521		if (ctrl & QMC_BD_RX_W)
522			chan->rxbd_done = chan->rxbds;
523		else
524			chan->rxbd_done++;
525
526		chan->rx_pending--;
527
528		if (complete) {
529			spin_unlock_irqrestore(&chan->rx_lock, flags);
530			complete(context, datalen);
531			spin_lock_irqsave(&chan->rx_lock, flags);
532		}
533
534		bd = chan->rxbd_done;
535		ctrl = qmc_read16(&bd->cbd_sc);
536	}
537
538end:
539	spin_unlock_irqrestore(&chan->rx_lock, flags);
540}
541
542static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
543{
544	return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
545}
546
547static int qmc_chan_stop_rx(struct qmc_chan *chan)
548{
549	unsigned long flags;
550	int ret;
551
552	spin_lock_irqsave(&chan->rx_lock, flags);
553
554	/* Send STOP RECEIVE command */
555	ret = qmc_chan_command(chan, 0x0);
556	if (ret) {
557		dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
558			chan->id, ret);
559		goto end;
560	}
561
562	chan->is_rx_stopped = true;
563
564end:
565	spin_unlock_irqrestore(&chan->rx_lock, flags);
566	return ret;
567}
568
569static int qmc_chan_stop_tx(struct qmc_chan *chan)
570{
571	unsigned long flags;
572	int ret;
573
574	spin_lock_irqsave(&chan->tx_lock, flags);
575
576	/* Send STOP TRANSMIT command */
577	ret = qmc_chan_command(chan, 0x1);
578	if (ret) {
579		dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
580			chan->id, ret);
581		goto end;
582	}
583
584	chan->is_tx_stopped = true;
585
586end:
587	spin_unlock_irqrestore(&chan->tx_lock, flags);
588	return ret;
589}
590
591int qmc_chan_stop(struct qmc_chan *chan, int direction)
592{
593	int ret;
594
595	if (direction & QMC_CHAN_READ) {
596		ret = qmc_chan_stop_rx(chan);
597		if (ret)
598			return ret;
599	}
600
601	if (direction & QMC_CHAN_WRITE) {
602		ret = qmc_chan_stop_tx(chan);
603		if (ret)
604			return ret;
605	}
606
607	return 0;
608}
609EXPORT_SYMBOL(qmc_chan_stop);
610
611static void qmc_chan_start_rx(struct qmc_chan *chan)
612{
613	unsigned long flags;
614
615	spin_lock_irqsave(&chan->rx_lock, flags);
616
617	/* Restart the receiver */
618	if (chan->mode == QMC_TRANSPARENT)
619		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
620	else
621		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
622	qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
623	chan->is_rx_halted = false;
624
625	chan->is_rx_stopped = false;
626
627	spin_unlock_irqrestore(&chan->rx_lock, flags);
628}
629
630static void qmc_chan_start_tx(struct qmc_chan *chan)
631{
632	unsigned long flags;
633
634	spin_lock_irqsave(&chan->tx_lock, flags);
635
636	/*
637	 * Enable channel transmitter as it could be disabled if
638	 * qmc_chan_reset() was called.
639	 */
640	qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
641
642	/* Set the POL bit in the channel mode register */
643	qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
644
645	chan->is_tx_stopped = false;
646
647	spin_unlock_irqrestore(&chan->tx_lock, flags);
648}
649
650int qmc_chan_start(struct qmc_chan *chan, int direction)
651{
652	if (direction & QMC_CHAN_READ)
653		qmc_chan_start_rx(chan);
654
655	if (direction & QMC_CHAN_WRITE)
656		qmc_chan_start_tx(chan);
657
658	return 0;
659}
660EXPORT_SYMBOL(qmc_chan_start);
661
662static void qmc_chan_reset_rx(struct qmc_chan *chan)
663{
664	struct qmc_xfer_desc *xfer_desc;
665	unsigned long flags;
666	cbd_t __iomem *bd;
667	u16 ctrl;
668
669	spin_lock_irqsave(&chan->rx_lock, flags);
670	bd = chan->rxbds;
671	do {
672		ctrl = qmc_read16(&bd->cbd_sc);
673		qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
674
675		xfer_desc = &chan->rx_desc[bd - chan->rxbds];
676		xfer_desc->rx_complete = NULL;
677		xfer_desc->context = NULL;
678
679		bd++;
680	} while (!(ctrl & QMC_BD_RX_W));
681
682	chan->rxbd_free = chan->rxbds;
683	chan->rxbd_done = chan->rxbds;
684	qmc_write16(chan->s_param + QMC_SPE_RBPTR,
685		    qmc_read16(chan->s_param + QMC_SPE_RBASE));
686
687	chan->rx_pending = 0;
688
689	spin_unlock_irqrestore(&chan->rx_lock, flags);
690}
691
692static void qmc_chan_reset_tx(struct qmc_chan *chan)
693{
694	struct qmc_xfer_desc *xfer_desc;
695	unsigned long flags;
696	cbd_t __iomem *bd;
697	u16 ctrl;
698
699	spin_lock_irqsave(&chan->tx_lock, flags);
700
701	/* Disable transmitter. It will be re-enable on qmc_chan_start() */
702	qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
703
704	bd = chan->txbds;
705	do {
706		ctrl = qmc_read16(&bd->cbd_sc);
707		qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
708
709		xfer_desc = &chan->tx_desc[bd - chan->txbds];
710		xfer_desc->tx_complete = NULL;
711		xfer_desc->context = NULL;
712
713		bd++;
714	} while (!(ctrl & QMC_BD_TX_W));
715
716	chan->txbd_free = chan->txbds;
717	chan->txbd_done = chan->txbds;
718	qmc_write16(chan->s_param + QMC_SPE_TBPTR,
719		    qmc_read16(chan->s_param + QMC_SPE_TBASE));
720
721	/* Reset TSTATE and ZISTATE to their initial value */
722	qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
723	qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
724
725	spin_unlock_irqrestore(&chan->tx_lock, flags);
726}
727
728int qmc_chan_reset(struct qmc_chan *chan, int direction)
729{
730	if (direction & QMC_CHAN_READ)
731		qmc_chan_reset_rx(chan);
732
733	if (direction & QMC_CHAN_WRITE)
734		qmc_chan_reset_tx(chan);
735
736	return 0;
737}
738EXPORT_SYMBOL(qmc_chan_reset);
739
740static int qmc_check_chans(struct qmc *qmc)
741{
742	struct tsa_serial_info info;
743	bool is_one_table = false;
744	struct qmc_chan *chan;
745	u64 tx_ts_mask = 0;
746	u64 rx_ts_mask = 0;
747	u64 tx_ts_assigned_mask;
748	u64 rx_ts_assigned_mask;
749	int ret;
750
751	/* Retrieve info from the TSA related serial */
752	ret = tsa_serial_get_info(qmc->tsa_serial, &info);
753	if (ret)
754		return ret;
755
756	if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
757		dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
758		return -EINVAL;
759	}
760
761	/*
762	 * If more than 32 TS are assigned to this serial, one common table is
763	 * used for Tx and Rx and so masks must be equal for all channels.
764	 */
765	if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
766		if (info.nb_tx_ts != info.nb_rx_ts) {
767			dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
768			return -EINVAL;
769		}
770		is_one_table = true;
771	}
772
773	tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
774	rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
775
776	list_for_each_entry(chan, &qmc->chan_head, list) {
777		if (chan->tx_ts_mask > tx_ts_assigned_mask) {
778			dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
779			return -EINVAL;
780		}
781		if (tx_ts_mask & chan->tx_ts_mask) {
782			dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
783			return -EINVAL;
784		}
785
786		if (chan->rx_ts_mask > rx_ts_assigned_mask) {
787			dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
788			return -EINVAL;
789		}
790		if (rx_ts_mask & chan->rx_ts_mask) {
791			dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
792			return -EINVAL;
793		}
794
795		if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
796			dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
797			return -EINVAL;
798		}
799
800		tx_ts_mask |= chan->tx_ts_mask;
801		rx_ts_mask |= chan->rx_ts_mask;
802	}
803
804	return 0;
805}
806
807static unsigned int qmc_nb_chans(struct qmc *qmc)
808{
809	unsigned int count = 0;
810	struct qmc_chan *chan;
811
812	list_for_each_entry(chan, &qmc->chan_head, list)
813		count++;
814
815	return count;
816}
817
818static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
819{
820	struct device_node *chan_np;
821	struct qmc_chan *chan;
822	const char *mode;
823	u32 chan_id;
824	u64 ts_mask;
825	int ret;
826
827	for_each_available_child_of_node(np, chan_np) {
828		ret = of_property_read_u32(chan_np, "reg", &chan_id);
829		if (ret) {
830			dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
831			of_node_put(chan_np);
832			return ret;
833		}
834		if (chan_id > 63) {
835			dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
836			of_node_put(chan_np);
837			return -EINVAL;
838		}
839
840		chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
841		if (!chan) {
842			of_node_put(chan_np);
843			return -ENOMEM;
844		}
845
846		chan->id = chan_id;
847		spin_lock_init(&chan->rx_lock);
848		spin_lock_init(&chan->tx_lock);
849
850		ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
851		if (ret) {
852			dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
853				chan_np);
854			of_node_put(chan_np);
855			return ret;
856		}
857		chan->tx_ts_mask = ts_mask;
858
859		ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
860		if (ret) {
861			dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
862				chan_np);
863			of_node_put(chan_np);
864			return ret;
865		}
866		chan->rx_ts_mask = ts_mask;
867
868		mode = "transparent";
869		ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
870		if (ret && ret != -EINVAL) {
871			dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
872				chan_np);
873			of_node_put(chan_np);
874			return ret;
875		}
876		if (!strcmp(mode, "transparent")) {
877			chan->mode = QMC_TRANSPARENT;
878		} else if (!strcmp(mode, "hdlc")) {
879			chan->mode = QMC_HDLC;
880		} else {
881			dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
882				chan_np, mode);
883			of_node_put(chan_np);
884			return -EINVAL;
885		}
886
887		chan->is_reverse_data = of_property_read_bool(chan_np,
888							      "fsl,reverse-data");
889
890		list_add_tail(&chan->list, &qmc->chan_head);
891		qmc->chans[chan->id] = chan;
892	}
893
894	return qmc_check_chans(qmc);
895}
896
897static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
898{
899	struct qmc_chan *chan;
900	unsigned int i;
901	u16 val;
902
903	/*
904	 * Use a common Tx/Rx 64 entries table.
905	 * Everything was previously checked, Tx and Rx related stuffs are
906	 * identical -> Used Rx related stuff to build the table
907	 */
908
909	/* Invalidate all entries */
910	for (i = 0; i < 64; i++)
911		qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
912
913	/* Set entries based on Rx stuff*/
914	list_for_each_entry(chan, &qmc->chan_head, list) {
915		for (i = 0; i < info->nb_rx_ts; i++) {
916			if (!(chan->rx_ts_mask & (((u64)1) << i)))
917				continue;
918
919			val = QMC_TSA_VALID | QMC_TSA_MASK |
920			      QMC_TSA_CHANNEL(chan->id);
921			qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
922		}
923	}
924
925	/* Set Wrap bit on last entry */
926	qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
927		      QMC_TSA_WRAP);
928
929	/* Init pointers to the table */
930	val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
931	qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
932	qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
933	qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
934	qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
935
936	return 0;
937}
938
939static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
940{
941	struct qmc_chan *chan;
942	unsigned int i;
943	u16 val;
944
945	/*
946	 * Use a Tx 32 entries table and a Rx 32 entries table.
947	 * Everything was previously checked.
948	 */
949
950	/* Invalidate all entries */
951	for (i = 0; i < 32; i++) {
952		qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
953		qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
954	}
955
956	/* Set entries based on Rx and Tx stuff*/
957	list_for_each_entry(chan, &qmc->chan_head, list) {
958		/* Rx part */
959		for (i = 0; i < info->nb_rx_ts; i++) {
960			if (!(chan->rx_ts_mask & (((u64)1) << i)))
961				continue;
962
963			val = QMC_TSA_VALID | QMC_TSA_MASK |
964			      QMC_TSA_CHANNEL(chan->id);
965			qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
966		}
967		/* Tx part */
968		for (i = 0; i < info->nb_tx_ts; i++) {
969			if (!(chan->tx_ts_mask & (((u64)1) << i)))
970				continue;
971
972			val = QMC_TSA_VALID | QMC_TSA_MASK |
973			      QMC_TSA_CHANNEL(chan->id);
974			qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
975		}
976	}
977
978	/* Set Wrap bit on last entries */
979	qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
980		      QMC_TSA_WRAP);
981	qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
982		      QMC_TSA_WRAP);
983
984	/* Init Rx pointers ...*/
985	val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
986	qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
987	qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
988
989	/* ... and Tx pointers */
990	val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
991	qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
992	qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
993
994	return 0;
995}
996
997static int qmc_setup_tsa(struct qmc *qmc)
998{
999	struct tsa_serial_info info;
1000	int ret;
1001
1002	/* Retrieve info from the TSA related serial */
1003	ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1004	if (ret)
1005		return ret;
1006
1007	/*
1008	 * Setup one common 64 entries table or two 32 entries (one for Tx and
1009	 * one for Tx) according to assigned TS numbers.
1010	 */
1011	return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1012		qmc_setup_tsa_64rxtx(qmc, &info) :
1013		qmc_setup_tsa_32rx_32tx(qmc, &info);
1014}
1015
1016static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
1017{
1018	struct tsa_serial_info info;
1019	u16 first_rx, last_tx;
1020	u16 trnsync;
1021	int ret;
1022
1023	/* Retrieve info from the TSA related serial */
1024	ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
1025	if (ret)
1026		return ret;
1027
1028	/* Find the first Rx TS allocated to the channel */
1029	first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1030
1031	/* Find the last Tx TS allocated to the channel */
1032	last_tx = fls64(chan->tx_ts_mask);
1033
1034	trnsync = 0;
1035	if (info.nb_rx_ts)
1036		trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1037	if (info.nb_tx_ts)
1038		trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1039
1040	qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1041
1042	dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1043		chan->id, trnsync,
1044		first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1045		last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1046
1047	return 0;
1048}
1049
1050static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1051{
1052	unsigned int i;
1053	cbd_t __iomem *bd;
1054	int ret;
1055	u16 val;
1056
1057	chan->qmc = qmc;
1058
1059	/* Set channel specific parameter base address */
1060	chan->s_param = qmc->dpram + (chan->id * 64);
1061	/* 16 bd per channel (8 rx and 8 tx) */
1062	chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1063	chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1064
1065	chan->txbd_free = chan->txbds;
1066	chan->txbd_done = chan->txbds;
1067	chan->rxbd_free = chan->rxbds;
1068	chan->rxbd_done = chan->rxbds;
1069
1070	/* TBASE and TBPTR*/
1071	val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1072	qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1073	qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1074
1075	/* RBASE and RBPTR*/
1076	val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1077	qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1078	qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1079	qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1080	qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1081	qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1082	if (chan->mode == QMC_TRANSPARENT) {
1083		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1084		qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1085		val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
1086		if (chan->is_reverse_data)
1087			val |= QMC_SPE_CHAMR_TRANSP_RD;
1088		qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1089		ret = qmc_setup_chan_trnsync(qmc, chan);
1090		if (ret)
1091			return ret;
1092	} else {
1093		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1094		qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1095		qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1096			QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1097	}
1098
1099	/* Do not enable interrupts now. They will be enabled later */
1100	qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1101
1102	/* Init Rx BDs and set Wrap bit on last descriptor */
1103	BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1104	val = QMC_BD_RX_I;
1105	for (i = 0; i < QMC_NB_RXBDS; i++) {
1106		bd = chan->rxbds + i;
1107		qmc_write16(&bd->cbd_sc, val);
1108	}
1109	bd = chan->rxbds + QMC_NB_RXBDS - 1;
1110	qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
1111
1112	/* Init Tx BDs and set Wrap bit on last descriptor */
1113	BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1114	val = QMC_BD_TX_I;
1115	if (chan->mode == QMC_HDLC)
1116		val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1117	for (i = 0; i < QMC_NB_TXBDS; i++) {
1118		bd = chan->txbds + i;
1119		qmc_write16(&bd->cbd_sc, val);
1120	}
1121	bd = chan->txbds + QMC_NB_TXBDS - 1;
1122	qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1123
1124	return 0;
1125}
1126
1127static int qmc_setup_chans(struct qmc *qmc)
1128{
1129	struct qmc_chan *chan;
1130	int ret;
1131
1132	list_for_each_entry(chan, &qmc->chan_head, list) {
1133		ret = qmc_setup_chan(qmc, chan);
1134		if (ret)
1135			return ret;
1136	}
1137
1138	return 0;
1139}
1140
1141static int qmc_finalize_chans(struct qmc *qmc)
1142{
1143	struct qmc_chan *chan;
1144	int ret;
1145
1146	list_for_each_entry(chan, &qmc->chan_head, list) {
1147		/* Unmask channel interrupts */
1148		if (chan->mode == QMC_HDLC) {
1149			qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1150				    QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1151				    QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1152				    QMC_INT_TXB | QMC_INT_RXB);
1153		} else {
1154			qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1155				    QMC_INT_UN | QMC_INT_BSY |
1156				    QMC_INT_TXB | QMC_INT_RXB);
1157		}
1158
1159		/* Forced stop the channel */
1160		ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1161		if (ret)
1162			return ret;
1163	}
1164
1165	return 0;
1166}
1167
1168static int qmc_setup_ints(struct qmc *qmc)
1169{
1170	unsigned int i;
1171	u16 __iomem *last;
1172
1173	/* Raz all entries */
1174	for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1175		qmc_write16(qmc->int_table + i, 0x0000);
1176
1177	/* Set Wrap bit on last entry */
1178	if (qmc->int_size >= sizeof(u16)) {
1179		last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1180		qmc_write16(last, QMC_INT_W);
1181	}
1182
1183	return 0;
1184}
1185
1186static void qmc_irq_gint(struct qmc *qmc)
1187{
1188	struct qmc_chan *chan;
1189	unsigned int chan_id;
1190	unsigned long flags;
1191	u16 int_entry;
1192
1193	int_entry = qmc_read16(qmc->int_curr);
1194	while (int_entry & QMC_INT_V) {
1195		/* Clear all but the Wrap bit */
1196		qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1197
1198		chan_id = QMC_INT_GET_CHANNEL(int_entry);
1199		chan = qmc->chans[chan_id];
1200		if (!chan) {
1201			dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1202			goto int_next;
1203		}
1204
1205		if (int_entry & QMC_INT_TXB)
1206			qmc_chan_write_done(chan);
1207
1208		if (int_entry & QMC_INT_UN) {
1209			dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1210				 int_entry);
1211			chan->nb_tx_underrun++;
1212		}
1213
1214		if (int_entry & QMC_INT_BSY) {
1215			dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1216				 int_entry);
1217			chan->nb_rx_busy++;
1218			/* Restart the receiver if needed */
1219			spin_lock_irqsave(&chan->rx_lock, flags);
1220			if (chan->rx_pending && !chan->is_rx_stopped) {
1221				if (chan->mode == QMC_TRANSPARENT)
1222					qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1223				else
1224					qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1225				qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1226				chan->is_rx_halted = false;
1227			} else {
1228				chan->is_rx_halted = true;
1229			}
1230			spin_unlock_irqrestore(&chan->rx_lock, flags);
1231		}
1232
1233		if (int_entry & QMC_INT_RXB)
1234			qmc_chan_read_done(chan);
1235
1236int_next:
1237		if (int_entry & QMC_INT_W)
1238			qmc->int_curr = qmc->int_table;
1239		else
1240			qmc->int_curr++;
1241		int_entry = qmc_read16(qmc->int_curr);
1242	}
1243}
1244
1245static irqreturn_t qmc_irq_handler(int irq, void *priv)
1246{
1247	struct qmc *qmc = (struct qmc *)priv;
1248	u16 scce;
1249
1250	scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1251	qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1252
1253	if (unlikely(scce & SCC_SCCE_IQOV))
1254		dev_info(qmc->dev, "IRQ queue overflow\n");
1255
1256	if (unlikely(scce & SCC_SCCE_GUN))
1257		dev_err(qmc->dev, "Global transmitter underrun\n");
1258
1259	if (unlikely(scce & SCC_SCCE_GOV))
1260		dev_err(qmc->dev, "Global receiver overrun\n");
1261
1262	/* normal interrupt */
1263	if (likely(scce & SCC_SCCE_GINT))
1264		qmc_irq_gint(qmc);
1265
1266	return IRQ_HANDLED;
1267}
1268
1269static int qmc_probe(struct platform_device *pdev)
1270{
1271	struct device_node *np = pdev->dev.of_node;
1272	unsigned int nb_chans;
1273	struct resource *res;
1274	struct qmc *qmc;
1275	int irq;
1276	int ret;
1277
1278	qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1279	if (!qmc)
1280		return -ENOMEM;
1281
1282	qmc->dev = &pdev->dev;
1283	INIT_LIST_HEAD(&qmc->chan_head);
1284
1285	qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1286	if (IS_ERR(qmc->scc_regs))
1287		return PTR_ERR(qmc->scc_regs);
1288
1289
1290	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1291	if (!res)
1292		return -EINVAL;
1293	qmc->scc_pram_offset = res->start - get_immrbase();
1294	qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1295	if (IS_ERR(qmc->scc_pram))
1296		return PTR_ERR(qmc->scc_pram);
1297
1298	qmc->dpram  = devm_platform_ioremap_resource_byname(pdev, "dpram");
1299	if (IS_ERR(qmc->dpram))
1300		return PTR_ERR(qmc->dpram);
1301
1302	qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1303	if (IS_ERR(qmc->tsa_serial)) {
1304		return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1305				     "Failed to get TSA serial\n");
1306	}
1307
1308	/* Connect the serial (SCC) to TSA */
1309	ret = tsa_serial_connect(qmc->tsa_serial);
1310	if (ret) {
1311		dev_err(qmc->dev, "Failed to connect TSA serial\n");
1312		return ret;
1313	}
1314
1315	/* Parse channels informationss */
1316	ret = qmc_of_parse_chans(qmc, np);
1317	if (ret)
1318		goto err_tsa_serial_disconnect;
1319
1320	nb_chans = qmc_nb_chans(qmc);
1321
1322	/* Init GMSR_H and GMSR_L registers */
1323	qmc_write32(qmc->scc_regs + SCC_GSMRH,
1324		    SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
1325
1326	/* enable QMC mode */
1327	qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
1328
1329	/*
1330	 * Allocate the buffer descriptor table
1331	 * 8 rx and 8 tx descriptors per channel
1332	 */
1333	qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1334	qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1335		&qmc->bd_dma_addr, GFP_KERNEL);
1336	if (!qmc->bd_table) {
1337		dev_err(qmc->dev, "Failed to allocate bd table\n");
1338		ret = -ENOMEM;
1339		goto err_tsa_serial_disconnect;
1340	}
1341	memset(qmc->bd_table, 0, qmc->bd_size);
1342
1343	qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1344
1345	/* Allocate the interrupt table */
1346	qmc->int_size = QMC_NB_INTS * sizeof(u16);
1347	qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1348		&qmc->int_dma_addr, GFP_KERNEL);
1349	if (!qmc->int_table) {
1350		dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1351		ret = -ENOMEM;
1352		goto err_tsa_serial_disconnect;
1353	}
1354	memset(qmc->int_table, 0, qmc->int_size);
1355
1356	qmc->int_curr = qmc->int_table;
1357	qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1358	qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1359
1360	/* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1361	qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1362
1363	qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1364	qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1365
1366	qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1367	qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1368
1369	ret = qmc_setup_tsa(qmc);
1370	if (ret)
1371		goto err_tsa_serial_disconnect;
1372
1373	qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
1374
1375	ret = qmc_setup_chans(qmc);
1376	if (ret)
1377		goto err_tsa_serial_disconnect;
1378
1379	/* Init interrupts table */
1380	ret = qmc_setup_ints(qmc);
1381	if (ret)
1382		goto err_tsa_serial_disconnect;
1383
1384	/* Disable and clear interrupts,  set the irq handler */
1385	qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1386	qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1387	irq = platform_get_irq(pdev, 0);
1388	if (irq < 0)
1389		goto err_tsa_serial_disconnect;
1390	ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
1391	if (ret < 0)
1392		goto err_tsa_serial_disconnect;
1393
1394	/* Enable interrupts */
1395	qmc_write16(qmc->scc_regs + SCC_SCCM,
1396		SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
1397
1398	ret = qmc_finalize_chans(qmc);
1399	if (ret < 0)
1400		goto err_disable_intr;
1401
1402	/* Enable transmiter and receiver */
1403	qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1404
1405	platform_set_drvdata(pdev, qmc);
1406
1407	return 0;
1408
1409err_disable_intr:
1410	qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1411
1412err_tsa_serial_disconnect:
1413	tsa_serial_disconnect(qmc->tsa_serial);
1414	return ret;
1415}
1416
1417static int qmc_remove(struct platform_device *pdev)
1418{
1419	struct qmc *qmc = platform_get_drvdata(pdev);
1420
1421	/* Disable transmiter and receiver */
1422	qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1423
1424	/* Disable interrupts */
1425	qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1426
1427	/* Disconnect the serial from TSA */
1428	tsa_serial_disconnect(qmc->tsa_serial);
1429
1430	return 0;
1431}
1432
1433static const struct of_device_id qmc_id_table[] = {
1434	{ .compatible = "fsl,cpm1-scc-qmc" },
1435	{} /* sentinel */
1436};
1437MODULE_DEVICE_TABLE(of, qmc_id_table);
1438
1439static struct platform_driver qmc_driver = {
1440	.driver = {
1441		.name = "fsl-qmc",
1442		.of_match_table = of_match_ptr(qmc_id_table),
1443	},
1444	.probe = qmc_probe,
1445	.remove = qmc_remove,
1446};
1447module_platform_driver(qmc_driver);
1448
1449struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
1450{
1451	struct of_phandle_args out_args;
1452	struct platform_device *pdev;
1453	struct qmc_chan *qmc_chan;
1454	struct qmc *qmc;
1455	int ret;
1456
1457	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
1458					       &out_args);
1459	if (ret < 0)
1460		return ERR_PTR(ret);
1461
1462	if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
1463		of_node_put(out_args.np);
1464		return ERR_PTR(-EINVAL);
1465	}
1466
1467	pdev = of_find_device_by_node(out_args.np);
1468	of_node_put(out_args.np);
1469	if (!pdev)
1470		return ERR_PTR(-ENODEV);
1471
1472	qmc = platform_get_drvdata(pdev);
1473	if (!qmc) {
1474		platform_device_put(pdev);
1475		return ERR_PTR(-EPROBE_DEFER);
1476	}
1477
1478	if (out_args.args_count != 1) {
1479		platform_device_put(pdev);
1480		return ERR_PTR(-EINVAL);
1481	}
1482
1483	if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
1484		platform_device_put(pdev);
1485		return ERR_PTR(-EINVAL);
1486	}
1487
1488	qmc_chan = qmc->chans[out_args.args[0]];
1489	if (!qmc_chan) {
1490		platform_device_put(pdev);
1491		return ERR_PTR(-ENOENT);
1492	}
1493
1494	return qmc_chan;
1495}
1496EXPORT_SYMBOL(qmc_chan_get_byphandle);
1497
1498void qmc_chan_put(struct qmc_chan *chan)
1499{
1500	put_device(chan->qmc->dev);
1501}
1502EXPORT_SYMBOL(qmc_chan_put);
1503
1504static void devm_qmc_chan_release(struct device *dev, void *res)
1505{
1506	struct qmc_chan **qmc_chan = res;
1507
1508	qmc_chan_put(*qmc_chan);
1509}
1510
1511struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
1512					     struct device_node *np,
1513					     const char *phandle_name)
1514{
1515	struct qmc_chan *qmc_chan;
1516	struct qmc_chan **dr;
1517
1518	dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1519	if (!dr)
1520		return ERR_PTR(-ENOMEM);
1521
1522	qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
1523	if (!IS_ERR(qmc_chan)) {
1524		*dr = qmc_chan;
1525		devres_add(dev, dr);
1526	} else {
1527		devres_free(dr);
1528	}
1529
1530	return qmc_chan;
1531}
1532EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
1533
1534MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1535MODULE_DESCRIPTION("CPM QMC driver");
1536MODULE_LICENSE("GPL");
1537