1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
4 *
5 *  Copyright (C) 2002 - 2011  Paul Mundt
6 *  Copyright (C) 2015 Glider bvba
7 *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
8 *
9 * based off of the old drivers/char/sh-sci.c by:
10 *
11 *   Copyright (C) 1999, 2000  Niibe Yutaka
12 *   Copyright (C) 2000  Sugioka Toshinobu
13 *   Modified to support multiple serial ports. Stuart Menefy (May 2000).
14 *   Modified to support SecureEdge. David McCullough (2002)
15 *   Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
16 *   Removed SH7300 support (Jul 2007).
17 */
18#undef DEBUG
19
20#include <linux/clk.h>
21#include <linux/console.h>
22#include <linux/ctype.h>
23#include <linux/cpufreq.h>
24#include <linux/delay.h>
25#include <linux/dmaengine.h>
26#include <linux/dma-mapping.h>
27#include <linux/err.h>
28#include <linux/errno.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/ioport.h>
32#include <linux/ktime.h>
33#include <linux/major.h>
34#include <linux/minmax.h>
35#include <linux/module.h>
36#include <linux/mm.h>
37#include <linux/of.h>
38#include <linux/of_device.h>
39#include <linux/platform_device.h>
40#include <linux/pm_runtime.h>
41#include <linux/scatterlist.h>
42#include <linux/serial.h>
43#include <linux/serial_sci.h>
44#include <linux/sh_dma.h>
45#include <linux/slab.h>
46#include <linux/string.h>
47#include <linux/sysrq.h>
48#include <linux/timer.h>
49#include <linux/tty.h>
50#include <linux/tty_flip.h>
51
52#ifdef CONFIG_SUPERH
53#include <asm/sh_bios.h>
54#include <asm/platform_early.h>
55#endif
56
57#include "serial_mctrl_gpio.h"
58#include "sh-sci.h"
59
60/* Offsets into the sci_port->irqs array */
61enum {
62	SCIx_ERI_IRQ,
63	SCIx_RXI_IRQ,
64	SCIx_TXI_IRQ,
65	SCIx_BRI_IRQ,
66	SCIx_DRI_IRQ,
67	SCIx_TEI_IRQ,
68	SCIx_NR_IRQS,
69
70	SCIx_MUX_IRQ = SCIx_NR_IRQS,	/* special case */
71};
72
73#define SCIx_IRQ_IS_MUXED(port)			\
74	((port)->irqs[SCIx_ERI_IRQ] ==	\
75	 (port)->irqs[SCIx_RXI_IRQ]) ||	\
76	((port)->irqs[SCIx_ERI_IRQ] &&	\
77	 ((port)->irqs[SCIx_RXI_IRQ] < 0))
78
79enum SCI_CLKS {
80	SCI_FCK,		/* Functional Clock */
81	SCI_SCK,		/* Optional External Clock */
82	SCI_BRG_INT,		/* Optional BRG Internal Clock Source */
83	SCI_SCIF_CLK,		/* Optional BRG External Clock Source */
84	SCI_NUM_CLKS
85};
86
87/* Bit x set means sampling rate x + 1 is supported */
88#define SCI_SR(x)		BIT((x) - 1)
89#define SCI_SR_RANGE(x, y)	GENMASK((y) - 1, (x) - 1)
90
91#define SCI_SR_SCIFAB		SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
92				SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
93				SCI_SR(19) | SCI_SR(27)
94
95#define min_sr(_port)		ffs((_port)->sampling_rate_mask)
96#define max_sr(_port)		fls((_port)->sampling_rate_mask)
97
98/* Iterate over all supported sampling rates, from high to low */
99#define for_each_sr(_sr, _port)						\
100	for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--)	\
101		if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
102
103struct plat_sci_reg {
104	u8 offset, size;
105};
106
107struct sci_port_params {
108	const struct plat_sci_reg regs[SCIx_NR_REGS];
109	unsigned int fifosize;
110	unsigned int overrun_reg;
111	unsigned int overrun_mask;
112	unsigned int sampling_rate_mask;
113	unsigned int error_mask;
114	unsigned int error_clear;
115};
116
117struct sci_port {
118	struct uart_port	port;
119
120	/* Platform configuration */
121	const struct sci_port_params *params;
122	const struct plat_sci_port *cfg;
123	unsigned int		sampling_rate_mask;
124	resource_size_t		reg_size;
125	struct mctrl_gpios	*gpios;
126
127	/* Clocks */
128	struct clk		*clks[SCI_NUM_CLKS];
129	unsigned long		clk_rates[SCI_NUM_CLKS];
130
131	int			irqs[SCIx_NR_IRQS];
132	char			*irqstr[SCIx_NR_IRQS];
133
134	struct dma_chan			*chan_tx;
135	struct dma_chan			*chan_rx;
136
137#ifdef CONFIG_SERIAL_SH_SCI_DMA
138	struct dma_chan			*chan_tx_saved;
139	struct dma_chan			*chan_rx_saved;
140	dma_cookie_t			cookie_tx;
141	dma_cookie_t			cookie_rx[2];
142	dma_cookie_t			active_rx;
143	dma_addr_t			tx_dma_addr;
144	unsigned int			tx_dma_len;
145	struct scatterlist		sg_rx[2];
146	void				*rx_buf[2];
147	size_t				buf_len_rx;
148	struct work_struct		work_tx;
149	struct hrtimer			rx_timer;
150	unsigned int			rx_timeout;	/* microseconds */
151#endif
152	unsigned int			rx_frame;
153	int				rx_trigger;
154	struct timer_list		rx_fifo_timer;
155	int				rx_fifo_timeout;
156	u16				hscif_tot;
157
158	bool has_rtscts;
159	bool autorts;
160};
161
162#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
163
164static struct sci_port sci_ports[SCI_NPORTS];
165static unsigned long sci_ports_in_use;
166static struct uart_driver sci_uart_driver;
167
168static inline struct sci_port *
169to_sci_port(struct uart_port *uart)
170{
171	return container_of(uart, struct sci_port, port);
172}
173
174static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
175	/*
176	 * Common SCI definitions, dependent on the port's regshift
177	 * value.
178	 */
179	[SCIx_SCI_REGTYPE] = {
180		.regs = {
181			[SCSMR]		= { 0x00,  8 },
182			[SCBRR]		= { 0x01,  8 },
183			[SCSCR]		= { 0x02,  8 },
184			[SCxTDR]	= { 0x03,  8 },
185			[SCxSR]		= { 0x04,  8 },
186			[SCxRDR]	= { 0x05,  8 },
187		},
188		.fifosize = 1,
189		.overrun_reg = SCxSR,
190		.overrun_mask = SCI_ORER,
191		.sampling_rate_mask = SCI_SR(32),
192		.error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
193		.error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
194	},
195
196	/*
197	 * Common definitions for legacy IrDA ports.
198	 */
199	[SCIx_IRDA_REGTYPE] = {
200		.regs = {
201			[SCSMR]		= { 0x00,  8 },
202			[SCBRR]		= { 0x02,  8 },
203			[SCSCR]		= { 0x04,  8 },
204			[SCxTDR]	= { 0x06,  8 },
205			[SCxSR]		= { 0x08, 16 },
206			[SCxRDR]	= { 0x0a,  8 },
207			[SCFCR]		= { 0x0c,  8 },
208			[SCFDR]		= { 0x0e, 16 },
209		},
210		.fifosize = 1,
211		.overrun_reg = SCxSR,
212		.overrun_mask = SCI_ORER,
213		.sampling_rate_mask = SCI_SR(32),
214		.error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
215		.error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
216	},
217
218	/*
219	 * Common SCIFA definitions.
220	 */
221	[SCIx_SCIFA_REGTYPE] = {
222		.regs = {
223			[SCSMR]		= { 0x00, 16 },
224			[SCBRR]		= { 0x04,  8 },
225			[SCSCR]		= { 0x08, 16 },
226			[SCxTDR]	= { 0x20,  8 },
227			[SCxSR]		= { 0x14, 16 },
228			[SCxRDR]	= { 0x24,  8 },
229			[SCFCR]		= { 0x18, 16 },
230			[SCFDR]		= { 0x1c, 16 },
231			[SCPCR]		= { 0x30, 16 },
232			[SCPDR]		= { 0x34, 16 },
233		},
234		.fifosize = 64,
235		.overrun_reg = SCxSR,
236		.overrun_mask = SCIFA_ORER,
237		.sampling_rate_mask = SCI_SR_SCIFAB,
238		.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
239		.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
240	},
241
242	/*
243	 * Common SCIFB definitions.
244	 */
245	[SCIx_SCIFB_REGTYPE] = {
246		.regs = {
247			[SCSMR]		= { 0x00, 16 },
248			[SCBRR]		= { 0x04,  8 },
249			[SCSCR]		= { 0x08, 16 },
250			[SCxTDR]	= { 0x40,  8 },
251			[SCxSR]		= { 0x14, 16 },
252			[SCxRDR]	= { 0x60,  8 },
253			[SCFCR]		= { 0x18, 16 },
254			[SCTFDR]	= { 0x38, 16 },
255			[SCRFDR]	= { 0x3c, 16 },
256			[SCPCR]		= { 0x30, 16 },
257			[SCPDR]		= { 0x34, 16 },
258		},
259		.fifosize = 256,
260		.overrun_reg = SCxSR,
261		.overrun_mask = SCIFA_ORER,
262		.sampling_rate_mask = SCI_SR_SCIFAB,
263		.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
264		.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
265	},
266
267	/*
268	 * Common SH-2(A) SCIF definitions for ports with FIFO data
269	 * count registers.
270	 */
271	[SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
272		.regs = {
273			[SCSMR]		= { 0x00, 16 },
274			[SCBRR]		= { 0x04,  8 },
275			[SCSCR]		= { 0x08, 16 },
276			[SCxTDR]	= { 0x0c,  8 },
277			[SCxSR]		= { 0x10, 16 },
278			[SCxRDR]	= { 0x14,  8 },
279			[SCFCR]		= { 0x18, 16 },
280			[SCFDR]		= { 0x1c, 16 },
281			[SCSPTR]	= { 0x20, 16 },
282			[SCLSR]		= { 0x24, 16 },
283		},
284		.fifosize = 16,
285		.overrun_reg = SCLSR,
286		.overrun_mask = SCLSR_ORER,
287		.sampling_rate_mask = SCI_SR(32),
288		.error_mask = SCIF_DEFAULT_ERROR_MASK,
289		.error_clear = SCIF_ERROR_CLEAR,
290	},
291
292	/*
293	 * The "SCIFA" that is in RZ/T and RZ/A2.
294	 * It looks like a normal SCIF with FIFO data, but with a
295	 * compressed address space. Also, the break out of interrupts
296	 * are different: ERI/BRI, RXI, TXI, TEI, DRI.
297	 */
298	[SCIx_RZ_SCIFA_REGTYPE] = {
299		.regs = {
300			[SCSMR]		= { 0x00, 16 },
301			[SCBRR]		= { 0x02,  8 },
302			[SCSCR]		= { 0x04, 16 },
303			[SCxTDR]	= { 0x06,  8 },
304			[SCxSR]		= { 0x08, 16 },
305			[SCxRDR]	= { 0x0A,  8 },
306			[SCFCR]		= { 0x0C, 16 },
307			[SCFDR]		= { 0x0E, 16 },
308			[SCSPTR]	= { 0x10, 16 },
309			[SCLSR]		= { 0x12, 16 },
310		},
311		.fifosize = 16,
312		.overrun_reg = SCLSR,
313		.overrun_mask = SCLSR_ORER,
314		.sampling_rate_mask = SCI_SR(32),
315		.error_mask = SCIF_DEFAULT_ERROR_MASK,
316		.error_clear = SCIF_ERROR_CLEAR,
317	},
318
319	/*
320	 * Common SH-3 SCIF definitions.
321	 */
322	[SCIx_SH3_SCIF_REGTYPE] = {
323		.regs = {
324			[SCSMR]		= { 0x00,  8 },
325			[SCBRR]		= { 0x02,  8 },
326			[SCSCR]		= { 0x04,  8 },
327			[SCxTDR]	= { 0x06,  8 },
328			[SCxSR]		= { 0x08, 16 },
329			[SCxRDR]	= { 0x0a,  8 },
330			[SCFCR]		= { 0x0c,  8 },
331			[SCFDR]		= { 0x0e, 16 },
332		},
333		.fifosize = 16,
334		.overrun_reg = SCLSR,
335		.overrun_mask = SCLSR_ORER,
336		.sampling_rate_mask = SCI_SR(32),
337		.error_mask = SCIF_DEFAULT_ERROR_MASK,
338		.error_clear = SCIF_ERROR_CLEAR,
339	},
340
341	/*
342	 * Common SH-4(A) SCIF(B) definitions.
343	 */
344	[SCIx_SH4_SCIF_REGTYPE] = {
345		.regs = {
346			[SCSMR]		= { 0x00, 16 },
347			[SCBRR]		= { 0x04,  8 },
348			[SCSCR]		= { 0x08, 16 },
349			[SCxTDR]	= { 0x0c,  8 },
350			[SCxSR]		= { 0x10, 16 },
351			[SCxRDR]	= { 0x14,  8 },
352			[SCFCR]		= { 0x18, 16 },
353			[SCFDR]		= { 0x1c, 16 },
354			[SCSPTR]	= { 0x20, 16 },
355			[SCLSR]		= { 0x24, 16 },
356		},
357		.fifosize = 16,
358		.overrun_reg = SCLSR,
359		.overrun_mask = SCLSR_ORER,
360		.sampling_rate_mask = SCI_SR(32),
361		.error_mask = SCIF_DEFAULT_ERROR_MASK,
362		.error_clear = SCIF_ERROR_CLEAR,
363	},
364
365	/*
366	 * Common SCIF definitions for ports with a Baud Rate Generator for
367	 * External Clock (BRG).
368	 */
369	[SCIx_SH4_SCIF_BRG_REGTYPE] = {
370		.regs = {
371			[SCSMR]		= { 0x00, 16 },
372			[SCBRR]		= { 0x04,  8 },
373			[SCSCR]		= { 0x08, 16 },
374			[SCxTDR]	= { 0x0c,  8 },
375			[SCxSR]		= { 0x10, 16 },
376			[SCxRDR]	= { 0x14,  8 },
377			[SCFCR]		= { 0x18, 16 },
378			[SCFDR]		= { 0x1c, 16 },
379			[SCSPTR]	= { 0x20, 16 },
380			[SCLSR]		= { 0x24, 16 },
381			[SCDL]		= { 0x30, 16 },
382			[SCCKS]		= { 0x34, 16 },
383		},
384		.fifosize = 16,
385		.overrun_reg = SCLSR,
386		.overrun_mask = SCLSR_ORER,
387		.sampling_rate_mask = SCI_SR(32),
388		.error_mask = SCIF_DEFAULT_ERROR_MASK,
389		.error_clear = SCIF_ERROR_CLEAR,
390	},
391
392	/*
393	 * Common HSCIF definitions.
394	 */
395	[SCIx_HSCIF_REGTYPE] = {
396		.regs = {
397			[SCSMR]		= { 0x00, 16 },
398			[SCBRR]		= { 0x04,  8 },
399			[SCSCR]		= { 0x08, 16 },
400			[SCxTDR]	= { 0x0c,  8 },
401			[SCxSR]		= { 0x10, 16 },
402			[SCxRDR]	= { 0x14,  8 },
403			[SCFCR]		= { 0x18, 16 },
404			[SCFDR]		= { 0x1c, 16 },
405			[SCSPTR]	= { 0x20, 16 },
406			[SCLSR]		= { 0x24, 16 },
407			[HSSRR]		= { 0x40, 16 },
408			[SCDL]		= { 0x30, 16 },
409			[SCCKS]		= { 0x34, 16 },
410			[HSRTRGR]	= { 0x54, 16 },
411			[HSTTRGR]	= { 0x58, 16 },
412		},
413		.fifosize = 128,
414		.overrun_reg = SCLSR,
415		.overrun_mask = SCLSR_ORER,
416		.sampling_rate_mask = SCI_SR_RANGE(8, 32),
417		.error_mask = SCIF_DEFAULT_ERROR_MASK,
418		.error_clear = SCIF_ERROR_CLEAR,
419	},
420
421	/*
422	 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
423	 * register.
424	 */
425	[SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
426		.regs = {
427			[SCSMR]		= { 0x00, 16 },
428			[SCBRR]		= { 0x04,  8 },
429			[SCSCR]		= { 0x08, 16 },
430			[SCxTDR]	= { 0x0c,  8 },
431			[SCxSR]		= { 0x10, 16 },
432			[SCxRDR]	= { 0x14,  8 },
433			[SCFCR]		= { 0x18, 16 },
434			[SCFDR]		= { 0x1c, 16 },
435			[SCLSR]		= { 0x24, 16 },
436		},
437		.fifosize = 16,
438		.overrun_reg = SCLSR,
439		.overrun_mask = SCLSR_ORER,
440		.sampling_rate_mask = SCI_SR(32),
441		.error_mask = SCIF_DEFAULT_ERROR_MASK,
442		.error_clear = SCIF_ERROR_CLEAR,
443	},
444
445	/*
446	 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
447	 * count registers.
448	 */
449	[SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
450		.regs = {
451			[SCSMR]		= { 0x00, 16 },
452			[SCBRR]		= { 0x04,  8 },
453			[SCSCR]		= { 0x08, 16 },
454			[SCxTDR]	= { 0x0c,  8 },
455			[SCxSR]		= { 0x10, 16 },
456			[SCxRDR]	= { 0x14,  8 },
457			[SCFCR]		= { 0x18, 16 },
458			[SCFDR]		= { 0x1c, 16 },
459			[SCTFDR]	= { 0x1c, 16 },	/* aliased to SCFDR */
460			[SCRFDR]	= { 0x20, 16 },
461			[SCSPTR]	= { 0x24, 16 },
462			[SCLSR]		= { 0x28, 16 },
463		},
464		.fifosize = 16,
465		.overrun_reg = SCLSR,
466		.overrun_mask = SCLSR_ORER,
467		.sampling_rate_mask = SCI_SR(32),
468		.error_mask = SCIF_DEFAULT_ERROR_MASK,
469		.error_clear = SCIF_ERROR_CLEAR,
470	},
471
472	/*
473	 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
474	 * registers.
475	 */
476	[SCIx_SH7705_SCIF_REGTYPE] = {
477		.regs = {
478			[SCSMR]		= { 0x00, 16 },
479			[SCBRR]		= { 0x04,  8 },
480			[SCSCR]		= { 0x08, 16 },
481			[SCxTDR]	= { 0x20,  8 },
482			[SCxSR]		= { 0x14, 16 },
483			[SCxRDR]	= { 0x24,  8 },
484			[SCFCR]		= { 0x18, 16 },
485			[SCFDR]		= { 0x1c, 16 },
486		},
487		.fifosize = 64,
488		.overrun_reg = SCxSR,
489		.overrun_mask = SCIFA_ORER,
490		.sampling_rate_mask = SCI_SR(16),
491		.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
492		.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
493	},
494};
495
496#define sci_getreg(up, offset)		(&to_sci_port(up)->params->regs[offset])
497
498/*
499 * The "offset" here is rather misleading, in that it refers to an enum
500 * value relative to the port mapping rather than the fixed offset
501 * itself, which needs to be manually retrieved from the platform's
502 * register map for the given port.
503 */
504static unsigned int sci_serial_in(struct uart_port *p, int offset)
505{
506	const struct plat_sci_reg *reg = sci_getreg(p, offset);
507
508	if (reg->size == 8)
509		return ioread8(p->membase + (reg->offset << p->regshift));
510	else if (reg->size == 16)
511		return ioread16(p->membase + (reg->offset << p->regshift));
512	else
513		WARN(1, "Invalid register access\n");
514
515	return 0;
516}
517
518static void sci_serial_out(struct uart_port *p, int offset, int value)
519{
520	const struct plat_sci_reg *reg = sci_getreg(p, offset);
521
522	if (reg->size == 8)
523		iowrite8(value, p->membase + (reg->offset << p->regshift));
524	else if (reg->size == 16)
525		iowrite16(value, p->membase + (reg->offset << p->regshift));
526	else
527		WARN(1, "Invalid register access\n");
528}
529
530static void sci_port_enable(struct sci_port *sci_port)
531{
532	unsigned int i;
533
534	if (!sci_port->port.dev)
535		return;
536
537	pm_runtime_get_sync(sci_port->port.dev);
538
539	for (i = 0; i < SCI_NUM_CLKS; i++) {
540		clk_prepare_enable(sci_port->clks[i]);
541		sci_port->clk_rates[i] = clk_get_rate(sci_port->clks[i]);
542	}
543	sci_port->port.uartclk = sci_port->clk_rates[SCI_FCK];
544}
545
546static void sci_port_disable(struct sci_port *sci_port)
547{
548	unsigned int i;
549
550	if (!sci_port->port.dev)
551		return;
552
553	for (i = SCI_NUM_CLKS; i-- > 0; )
554		clk_disable_unprepare(sci_port->clks[i]);
555
556	pm_runtime_put_sync(sci_port->port.dev);
557}
558
559static inline unsigned long port_rx_irq_mask(struct uart_port *port)
560{
561	/*
562	 * Not all ports (such as SCIFA) will support REIE. Rather than
563	 * special-casing the port type, we check the port initialization
564	 * IRQ enable mask to see whether the IRQ is desired at all. If
565	 * it's unset, it's logically inferred that there's no point in
566	 * testing for it.
567	 */
568	return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
569}
570
571static void sci_start_tx(struct uart_port *port)
572{
573	struct sci_port *s = to_sci_port(port);
574	unsigned short ctrl;
575
576#ifdef CONFIG_SERIAL_SH_SCI_DMA
577	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
578		u16 new, scr = serial_port_in(port, SCSCR);
579		if (s->chan_tx)
580			new = scr | SCSCR_TDRQE;
581		else
582			new = scr & ~SCSCR_TDRQE;
583		if (new != scr)
584			serial_port_out(port, SCSCR, new);
585	}
586
587	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
588	    dma_submit_error(s->cookie_tx)) {
589		s->cookie_tx = 0;
590		schedule_work(&s->work_tx);
591	}
592#endif
593
594	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
595		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
596		ctrl = serial_port_in(port, SCSCR);
597		serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
598	}
599}
600
601static void sci_stop_tx(struct uart_port *port)
602{
603	unsigned short ctrl;
604
605	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
606	ctrl = serial_port_in(port, SCSCR);
607
608	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
609		ctrl &= ~SCSCR_TDRQE;
610
611	ctrl &= ~SCSCR_TIE;
612
613	serial_port_out(port, SCSCR, ctrl);
614
615#ifdef CONFIG_SERIAL_SH_SCI_DMA
616	if (to_sci_port(port)->chan_tx &&
617	    !dma_submit_error(to_sci_port(port)->cookie_tx)) {
618		dmaengine_terminate_async(to_sci_port(port)->chan_tx);
619		to_sci_port(port)->cookie_tx = -EINVAL;
620	}
621#endif
622}
623
624static void sci_start_rx(struct uart_port *port)
625{
626	unsigned short ctrl;
627
628	ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
629
630	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
631		ctrl &= ~SCSCR_RDRQE;
632
633	serial_port_out(port, SCSCR, ctrl);
634}
635
636static void sci_stop_rx(struct uart_port *port)
637{
638	unsigned short ctrl;
639
640	ctrl = serial_port_in(port, SCSCR);
641
642	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
643		ctrl &= ~SCSCR_RDRQE;
644
645	ctrl &= ~port_rx_irq_mask(port);
646
647	serial_port_out(port, SCSCR, ctrl);
648}
649
650static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
651{
652	if (port->type == PORT_SCI) {
653		/* Just store the mask */
654		serial_port_out(port, SCxSR, mask);
655	} else if (to_sci_port(port)->params->overrun_mask == SCIFA_ORER) {
656		/* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */
657		/* Only clear the status bits we want to clear */
658		serial_port_out(port, SCxSR,
659				serial_port_in(port, SCxSR) & mask);
660	} else {
661		/* Store the mask, clear parity/framing errors */
662		serial_port_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
663	}
664}
665
666#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
667    defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
668
669#ifdef CONFIG_CONSOLE_POLL
670static int sci_poll_get_char(struct uart_port *port)
671{
672	unsigned short status;
673	int c;
674
675	do {
676		status = serial_port_in(port, SCxSR);
677		if (status & SCxSR_ERRORS(port)) {
678			sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
679			continue;
680		}
681		break;
682	} while (1);
683
684	if (!(status & SCxSR_RDxF(port)))
685		return NO_POLL_CHAR;
686
687	c = serial_port_in(port, SCxRDR);
688
689	/* Dummy read */
690	serial_port_in(port, SCxSR);
691	sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
692
693	return c;
694}
695#endif
696
697static void sci_poll_put_char(struct uart_port *port, unsigned char c)
698{
699	unsigned short status;
700
701	do {
702		status = serial_port_in(port, SCxSR);
703	} while (!(status & SCxSR_TDxE(port)));
704
705	serial_port_out(port, SCxTDR, c);
706	sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
707}
708#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
709	  CONFIG_SERIAL_SH_SCI_EARLYCON */
710
711static void sci_init_pins(struct uart_port *port, unsigned int cflag)
712{
713	struct sci_port *s = to_sci_port(port);
714
715	/*
716	 * Use port-specific handler if provided.
717	 */
718	if (s->cfg->ops && s->cfg->ops->init_pins) {
719		s->cfg->ops->init_pins(port, cflag);
720		return;
721	}
722
723	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
724		u16 data = serial_port_in(port, SCPDR);
725		u16 ctrl = serial_port_in(port, SCPCR);
726
727		/* Enable RXD and TXD pin functions */
728		ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
729		if (to_sci_port(port)->has_rtscts) {
730			/* RTS# is output, active low, unless autorts */
731			if (!(port->mctrl & TIOCM_RTS)) {
732				ctrl |= SCPCR_RTSC;
733				data |= SCPDR_RTSD;
734			} else if (!s->autorts) {
735				ctrl |= SCPCR_RTSC;
736				data &= ~SCPDR_RTSD;
737			} else {
738				/* Enable RTS# pin function */
739				ctrl &= ~SCPCR_RTSC;
740			}
741			/* Enable CTS# pin function */
742			ctrl &= ~SCPCR_CTSC;
743		}
744		serial_port_out(port, SCPDR, data);
745		serial_port_out(port, SCPCR, ctrl);
746	} else if (sci_getreg(port, SCSPTR)->size) {
747		u16 status = serial_port_in(port, SCSPTR);
748
749		/* RTS# is always output; and active low, unless autorts */
750		status |= SCSPTR_RTSIO;
751		if (!(port->mctrl & TIOCM_RTS))
752			status |= SCSPTR_RTSDT;
753		else if (!s->autorts)
754			status &= ~SCSPTR_RTSDT;
755		/* CTS# and SCK are inputs */
756		status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
757		serial_port_out(port, SCSPTR, status);
758	}
759}
760
761static int sci_txfill(struct uart_port *port)
762{
763	struct sci_port *s = to_sci_port(port);
764	unsigned int fifo_mask = (s->params->fifosize << 1) - 1;
765	const struct plat_sci_reg *reg;
766
767	reg = sci_getreg(port, SCTFDR);
768	if (reg->size)
769		return serial_port_in(port, SCTFDR) & fifo_mask;
770
771	reg = sci_getreg(port, SCFDR);
772	if (reg->size)
773		return serial_port_in(port, SCFDR) >> 8;
774
775	return !(serial_port_in(port, SCxSR) & SCI_TDRE);
776}
777
778static int sci_txroom(struct uart_port *port)
779{
780	return port->fifosize - sci_txfill(port);
781}
782
783static int sci_rxfill(struct uart_port *port)
784{
785	struct sci_port *s = to_sci_port(port);
786	unsigned int fifo_mask = (s->params->fifosize << 1) - 1;
787	const struct plat_sci_reg *reg;
788
789	reg = sci_getreg(port, SCRFDR);
790	if (reg->size)
791		return serial_port_in(port, SCRFDR) & fifo_mask;
792
793	reg = sci_getreg(port, SCFDR);
794	if (reg->size)
795		return serial_port_in(port, SCFDR) & fifo_mask;
796
797	return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
798}
799
800/* ********************************************************************** *
801 *                   the interrupt related routines                       *
802 * ********************************************************************** */
803
804static void sci_transmit_chars(struct uart_port *port)
805{
806	struct circ_buf *xmit = &port->state->xmit;
807	unsigned int stopped = uart_tx_stopped(port);
808	unsigned short status;
809	unsigned short ctrl;
810	int count;
811
812	status = serial_port_in(port, SCxSR);
813	if (!(status & SCxSR_TDxE(port))) {
814		ctrl = serial_port_in(port, SCSCR);
815		if (uart_circ_empty(xmit))
816			ctrl &= ~SCSCR_TIE;
817		else
818			ctrl |= SCSCR_TIE;
819		serial_port_out(port, SCSCR, ctrl);
820		return;
821	}
822
823	count = sci_txroom(port);
824
825	do {
826		unsigned char c;
827
828		if (port->x_char) {
829			c = port->x_char;
830			port->x_char = 0;
831		} else if (!uart_circ_empty(xmit) && !stopped) {
832			c = xmit->buf[xmit->tail];
833			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
834		} else {
835			break;
836		}
837
838		serial_port_out(port, SCxTDR, c);
839
840		port->icount.tx++;
841	} while (--count > 0);
842
843	sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
844
845	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
846		uart_write_wakeup(port);
847	if (uart_circ_empty(xmit))
848		sci_stop_tx(port);
849
850}
851
852/* On SH3, SCIF may read end-of-break as a space->mark char */
853#define STEPFN(c)  ({int __c = (c); (((__c-1)|(__c)) == -1); })
854
855static void sci_receive_chars(struct uart_port *port)
856{
857	struct tty_port *tport = &port->state->port;
858	int i, count, copied = 0;
859	unsigned short status;
860	unsigned char flag;
861
862	status = serial_port_in(port, SCxSR);
863	if (!(status & SCxSR_RDxF(port)))
864		return;
865
866	while (1) {
867		/* Don't copy more bytes than there is room for in the buffer */
868		count = tty_buffer_request_room(tport, sci_rxfill(port));
869
870		/* If for any reason we can't copy more data, we're done! */
871		if (count == 0)
872			break;
873
874		if (port->type == PORT_SCI) {
875			char c = serial_port_in(port, SCxRDR);
876			if (uart_handle_sysrq_char(port, c))
877				count = 0;
878			else
879				tty_insert_flip_char(tport, c, TTY_NORMAL);
880		} else {
881			for (i = 0; i < count; i++) {
882				char c;
883
884				if (port->type == PORT_SCIF ||
885				    port->type == PORT_HSCIF) {
886					status = serial_port_in(port, SCxSR);
887					c = serial_port_in(port, SCxRDR);
888				} else {
889					c = serial_port_in(port, SCxRDR);
890					status = serial_port_in(port, SCxSR);
891				}
892				if (uart_handle_sysrq_char(port, c)) {
893					count--; i--;
894					continue;
895				}
896
897				/* Store data and status */
898				if (status & SCxSR_FER(port)) {
899					flag = TTY_FRAME;
900					port->icount.frame++;
901					dev_notice(port->dev, "frame error\n");
902				} else if (status & SCxSR_PER(port)) {
903					flag = TTY_PARITY;
904					port->icount.parity++;
905					dev_notice(port->dev, "parity error\n");
906				} else
907					flag = TTY_NORMAL;
908
909				tty_insert_flip_char(tport, c, flag);
910			}
911		}
912
913		serial_port_in(port, SCxSR); /* dummy read */
914		sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
915
916		copied += count;
917		port->icount.rx += count;
918	}
919
920	if (copied) {
921		/* Tell the rest of the system the news. New characters! */
922		tty_flip_buffer_push(tport);
923	} else {
924		/* TTY buffers full; read from RX reg to prevent lockup */
925		serial_port_in(port, SCxRDR);
926		serial_port_in(port, SCxSR); /* dummy read */
927		sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
928	}
929}
930
931static int sci_handle_errors(struct uart_port *port)
932{
933	int copied = 0;
934	unsigned short status = serial_port_in(port, SCxSR);
935	struct tty_port *tport = &port->state->port;
936	struct sci_port *s = to_sci_port(port);
937
938	/* Handle overruns */
939	if (status & s->params->overrun_mask) {
940		port->icount.overrun++;
941
942		/* overrun error */
943		if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
944			copied++;
945
946		dev_notice(port->dev, "overrun error\n");
947	}
948
949	if (status & SCxSR_FER(port)) {
950		/* frame error */
951		port->icount.frame++;
952
953		if (tty_insert_flip_char(tport, 0, TTY_FRAME))
954			copied++;
955
956		dev_notice(port->dev, "frame error\n");
957	}
958
959	if (status & SCxSR_PER(port)) {
960		/* parity error */
961		port->icount.parity++;
962
963		if (tty_insert_flip_char(tport, 0, TTY_PARITY))
964			copied++;
965
966		dev_notice(port->dev, "parity error\n");
967	}
968
969	if (copied)
970		tty_flip_buffer_push(tport);
971
972	return copied;
973}
974
975static int sci_handle_fifo_overrun(struct uart_port *port)
976{
977	struct tty_port *tport = &port->state->port;
978	struct sci_port *s = to_sci_port(port);
979	const struct plat_sci_reg *reg;
980	int copied = 0;
981	u16 status;
982
983	reg = sci_getreg(port, s->params->overrun_reg);
984	if (!reg->size)
985		return 0;
986
987	status = serial_port_in(port, s->params->overrun_reg);
988	if (status & s->params->overrun_mask) {
989		status &= ~s->params->overrun_mask;
990		serial_port_out(port, s->params->overrun_reg, status);
991
992		port->icount.overrun++;
993
994		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
995		tty_flip_buffer_push(tport);
996
997		dev_dbg(port->dev, "overrun error\n");
998		copied++;
999	}
1000
1001	return copied;
1002}
1003
1004static int sci_handle_breaks(struct uart_port *port)
1005{
1006	int copied = 0;
1007	unsigned short status = serial_port_in(port, SCxSR);
1008	struct tty_port *tport = &port->state->port;
1009
1010	if (uart_handle_break(port))
1011		return 0;
1012
1013	if (status & SCxSR_BRK(port)) {
1014		port->icount.brk++;
1015
1016		/* Notify of BREAK */
1017		if (tty_insert_flip_char(tport, 0, TTY_BREAK))
1018			copied++;
1019
1020		dev_dbg(port->dev, "BREAK detected\n");
1021	}
1022
1023	if (copied)
1024		tty_flip_buffer_push(tport);
1025
1026	copied += sci_handle_fifo_overrun(port);
1027
1028	return copied;
1029}
1030
1031static int scif_set_rtrg(struct uart_port *port, int rx_trig)
1032{
1033	unsigned int bits;
1034
1035	if (rx_trig >= port->fifosize)
1036		rx_trig = port->fifosize - 1;
1037	if (rx_trig < 1)
1038		rx_trig = 1;
1039
1040	/* HSCIF can be set to an arbitrary level. */
1041	if (sci_getreg(port, HSRTRGR)->size) {
1042		serial_port_out(port, HSRTRGR, rx_trig);
1043		return rx_trig;
1044	}
1045
1046	switch (port->type) {
1047	case PORT_SCIF:
1048		if (rx_trig < 4) {
1049			bits = 0;
1050			rx_trig = 1;
1051		} else if (rx_trig < 8) {
1052			bits = SCFCR_RTRG0;
1053			rx_trig = 4;
1054		} else if (rx_trig < 14) {
1055			bits = SCFCR_RTRG1;
1056			rx_trig = 8;
1057		} else {
1058			bits = SCFCR_RTRG0 | SCFCR_RTRG1;
1059			rx_trig = 14;
1060		}
1061		break;
1062	case PORT_SCIFA:
1063	case PORT_SCIFB:
1064		if (rx_trig < 16) {
1065			bits = 0;
1066			rx_trig = 1;
1067		} else if (rx_trig < 32) {
1068			bits = SCFCR_RTRG0;
1069			rx_trig = 16;
1070		} else if (rx_trig < 48) {
1071			bits = SCFCR_RTRG1;
1072			rx_trig = 32;
1073		} else {
1074			bits = SCFCR_RTRG0 | SCFCR_RTRG1;
1075			rx_trig = 48;
1076		}
1077		break;
1078	default:
1079		WARN(1, "unknown FIFO configuration");
1080		return 1;
1081	}
1082
1083	serial_port_out(port, SCFCR,
1084		(serial_port_in(port, SCFCR) &
1085		~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
1086
1087	return rx_trig;
1088}
1089
1090static int scif_rtrg_enabled(struct uart_port *port)
1091{
1092	if (sci_getreg(port, HSRTRGR)->size)
1093		return serial_port_in(port, HSRTRGR) != 0;
1094	else
1095		return (serial_port_in(port, SCFCR) &
1096			(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
1097}
1098
1099static void rx_fifo_timer_fn(struct timer_list *t)
1100{
1101	struct sci_port *s = from_timer(s, t, rx_fifo_timer);
1102	struct uart_port *port = &s->port;
1103
1104	dev_dbg(port->dev, "Rx timed out\n");
1105	scif_set_rtrg(port, 1);
1106}
1107
1108static ssize_t rx_fifo_trigger_show(struct device *dev,
1109				    struct device_attribute *attr, char *buf)
1110{
1111	struct uart_port *port = dev_get_drvdata(dev);
1112	struct sci_port *sci = to_sci_port(port);
1113
1114	return sprintf(buf, "%d\n", sci->rx_trigger);
1115}
1116
1117static ssize_t rx_fifo_trigger_store(struct device *dev,
1118				     struct device_attribute *attr,
1119				     const char *buf, size_t count)
1120{
1121	struct uart_port *port = dev_get_drvdata(dev);
1122	struct sci_port *sci = to_sci_port(port);
1123	int ret;
1124	long r;
1125
1126	ret = kstrtol(buf, 0, &r);
1127	if (ret)
1128		return ret;
1129
1130	sci->rx_trigger = scif_set_rtrg(port, r);
1131	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1132		scif_set_rtrg(port, 1);
1133
1134	return count;
1135}
1136
1137static DEVICE_ATTR_RW(rx_fifo_trigger);
1138
1139static ssize_t rx_fifo_timeout_show(struct device *dev,
1140			       struct device_attribute *attr,
1141			       char *buf)
1142{
1143	struct uart_port *port = dev_get_drvdata(dev);
1144	struct sci_port *sci = to_sci_port(port);
1145	int v;
1146
1147	if (port->type == PORT_HSCIF)
1148		v = sci->hscif_tot >> HSSCR_TOT_SHIFT;
1149	else
1150		v = sci->rx_fifo_timeout;
1151
1152	return sprintf(buf, "%d\n", v);
1153}
1154
1155static ssize_t rx_fifo_timeout_store(struct device *dev,
1156				struct device_attribute *attr,
1157				const char *buf,
1158				size_t count)
1159{
1160	struct uart_port *port = dev_get_drvdata(dev);
1161	struct sci_port *sci = to_sci_port(port);
1162	int ret;
1163	long r;
1164
1165	ret = kstrtol(buf, 0, &r);
1166	if (ret)
1167		return ret;
1168
1169	if (port->type == PORT_HSCIF) {
1170		if (r < 0 || r > 3)
1171			return -EINVAL;
1172		sci->hscif_tot = r << HSSCR_TOT_SHIFT;
1173	} else {
1174		sci->rx_fifo_timeout = r;
1175		scif_set_rtrg(port, 1);
1176		if (r > 0)
1177			timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
1178	}
1179
1180	return count;
1181}
1182
1183static DEVICE_ATTR_RW(rx_fifo_timeout);
1184
1185
1186#ifdef CONFIG_SERIAL_SH_SCI_DMA
1187static void sci_dma_tx_complete(void *arg)
1188{
1189	struct sci_port *s = arg;
1190	struct uart_port *port = &s->port;
1191	struct circ_buf *xmit = &port->state->xmit;
1192	unsigned long flags;
1193
1194	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1195
1196	spin_lock_irqsave(&port->lock, flags);
1197
1198	xmit->tail += s->tx_dma_len;
1199	xmit->tail &= UART_XMIT_SIZE - 1;
1200
1201	port->icount.tx += s->tx_dma_len;
1202
1203	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1204		uart_write_wakeup(port);
1205
1206	if (!uart_circ_empty(xmit)) {
1207		s->cookie_tx = 0;
1208		schedule_work(&s->work_tx);
1209	} else {
1210		s->cookie_tx = -EINVAL;
1211		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1212			u16 ctrl = serial_port_in(port, SCSCR);
1213			serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1214		}
1215	}
1216
1217	spin_unlock_irqrestore(&port->lock, flags);
1218}
1219
1220/* Locking: called with port lock held */
1221static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
1222{
1223	struct uart_port *port = &s->port;
1224	struct tty_port *tport = &port->state->port;
1225	int copied;
1226
1227	copied = tty_insert_flip_string(tport, buf, count);
1228	if (copied < count)
1229		port->icount.buf_overrun++;
1230
1231	port->icount.rx += copied;
1232
1233	return copied;
1234}
1235
1236static int sci_dma_rx_find_active(struct sci_port *s)
1237{
1238	unsigned int i;
1239
1240	for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
1241		if (s->active_rx == s->cookie_rx[i])
1242			return i;
1243
1244	return -1;
1245}
1246
1247static void sci_dma_rx_chan_invalidate(struct sci_port *s)
1248{
1249	unsigned int i;
1250
1251	s->chan_rx = NULL;
1252	for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
1253		s->cookie_rx[i] = -EINVAL;
1254	s->active_rx = 0;
1255}
1256
1257static void sci_dma_rx_release(struct sci_port *s)
1258{
1259	struct dma_chan *chan = s->chan_rx_saved;
1260
1261	s->chan_rx_saved = NULL;
1262	sci_dma_rx_chan_invalidate(s);
1263	dmaengine_terminate_sync(chan);
1264	dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
1265			  sg_dma_address(&s->sg_rx[0]));
1266	dma_release_channel(chan);
1267}
1268
1269static void start_hrtimer_us(struct hrtimer *hrt, unsigned long usec)
1270{
1271	long sec = usec / 1000000;
1272	long nsec = (usec % 1000000) * 1000;
1273	ktime_t t = ktime_set(sec, nsec);
1274
1275	hrtimer_start(hrt, t, HRTIMER_MODE_REL);
1276}
1277
1278static void sci_dma_rx_reenable_irq(struct sci_port *s)
1279{
1280	struct uart_port *port = &s->port;
1281	u16 scr;
1282
1283	/* Direct new serial port interrupts back to CPU */
1284	scr = serial_port_in(port, SCSCR);
1285	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1286		scr &= ~SCSCR_RDRQE;
1287		enable_irq(s->irqs[SCIx_RXI_IRQ]);
1288	}
1289	serial_port_out(port, SCSCR, scr | SCSCR_RIE);
1290}
1291
1292static void sci_dma_rx_complete(void *arg)
1293{
1294	struct sci_port *s = arg;
1295	struct dma_chan *chan = s->chan_rx;
1296	struct uart_port *port = &s->port;
1297	struct dma_async_tx_descriptor *desc;
1298	unsigned long flags;
1299	int active, count = 0;
1300
1301	dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
1302		s->active_rx);
1303
1304	spin_lock_irqsave(&port->lock, flags);
1305
1306	active = sci_dma_rx_find_active(s);
1307	if (active >= 0)
1308		count = sci_dma_rx_push(s, s->rx_buf[active], s->buf_len_rx);
1309
1310	start_hrtimer_us(&s->rx_timer, s->rx_timeout);
1311
1312	if (count)
1313		tty_flip_buffer_push(&port->state->port);
1314
1315	desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[active], 1,
1316				       DMA_DEV_TO_MEM,
1317				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1318	if (!desc)
1319		goto fail;
1320
1321	desc->callback = sci_dma_rx_complete;
1322	desc->callback_param = s;
1323	s->cookie_rx[active] = dmaengine_submit(desc);
1324	if (dma_submit_error(s->cookie_rx[active]))
1325		goto fail;
1326
1327	s->active_rx = s->cookie_rx[!active];
1328
1329	dma_async_issue_pending(chan);
1330
1331	spin_unlock_irqrestore(&port->lock, flags);
1332	dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
1333		__func__, s->cookie_rx[active], active, s->active_rx);
1334	return;
1335
1336fail:
1337	spin_unlock_irqrestore(&port->lock, flags);
1338	dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1339	/* Switch to PIO */
1340	spin_lock_irqsave(&port->lock, flags);
1341	dmaengine_terminate_async(chan);
1342	sci_dma_rx_chan_invalidate(s);
1343	sci_dma_rx_reenable_irq(s);
1344	spin_unlock_irqrestore(&port->lock, flags);
1345}
1346
1347static void sci_dma_tx_release(struct sci_port *s)
1348{
1349	struct dma_chan *chan = s->chan_tx_saved;
1350
1351	cancel_work_sync(&s->work_tx);
1352	s->chan_tx_saved = s->chan_tx = NULL;
1353	s->cookie_tx = -EINVAL;
1354	dmaengine_terminate_sync(chan);
1355	dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
1356			 DMA_TO_DEVICE);
1357	dma_release_channel(chan);
1358}
1359
1360static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held)
1361{
1362	struct dma_chan *chan = s->chan_rx;
1363	struct uart_port *port = &s->port;
1364	unsigned long flags;
1365	int i;
1366
1367	for (i = 0; i < 2; i++) {
1368		struct scatterlist *sg = &s->sg_rx[i];
1369		struct dma_async_tx_descriptor *desc;
1370
1371		desc = dmaengine_prep_slave_sg(chan,
1372			sg, 1, DMA_DEV_TO_MEM,
1373			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1374		if (!desc)
1375			goto fail;
1376
1377		desc->callback = sci_dma_rx_complete;
1378		desc->callback_param = s;
1379		s->cookie_rx[i] = dmaengine_submit(desc);
1380		if (dma_submit_error(s->cookie_rx[i]))
1381			goto fail;
1382
1383	}
1384
1385	s->active_rx = s->cookie_rx[0];
1386
1387	dma_async_issue_pending(chan);
1388	return 0;
1389
1390fail:
1391	/* Switch to PIO */
1392	if (!port_lock_held)
1393		spin_lock_irqsave(&port->lock, flags);
1394	if (i)
1395		dmaengine_terminate_async(chan);
1396	sci_dma_rx_chan_invalidate(s);
1397	sci_start_rx(port);
1398	if (!port_lock_held)
1399		spin_unlock_irqrestore(&port->lock, flags);
1400	return -EAGAIN;
1401}
1402
1403static void sci_dma_tx_work_fn(struct work_struct *work)
1404{
1405	struct sci_port *s = container_of(work, struct sci_port, work_tx);
1406	struct dma_async_tx_descriptor *desc;
1407	struct dma_chan *chan = s->chan_tx;
1408	struct uart_port *port = &s->port;
1409	struct circ_buf *xmit = &port->state->xmit;
1410	unsigned long flags;
1411	dma_addr_t buf;
1412	int head, tail;
1413
1414	/*
1415	 * DMA is idle now.
1416	 * Port xmit buffer is already mapped, and it is one page... Just adjust
1417	 * offsets and lengths. Since it is a circular buffer, we have to
1418	 * transmit till the end, and then the rest. Take the port lock to get a
1419	 * consistent xmit buffer state.
1420	 */
1421	spin_lock_irq(&port->lock);
1422	head = xmit->head;
1423	tail = xmit->tail;
1424	buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
1425	s->tx_dma_len = min_t(unsigned int,
1426		CIRC_CNT(head, tail, UART_XMIT_SIZE),
1427		CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
1428	if (!s->tx_dma_len) {
1429		/* Transmit buffer has been flushed */
1430		spin_unlock_irq(&port->lock);
1431		return;
1432	}
1433
1434	desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
1435					   DMA_MEM_TO_DEV,
1436					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1437	if (!desc) {
1438		spin_unlock_irq(&port->lock);
1439		dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
1440		goto switch_to_pio;
1441	}
1442
1443	dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
1444				   DMA_TO_DEVICE);
1445
1446	desc->callback = sci_dma_tx_complete;
1447	desc->callback_param = s;
1448	s->cookie_tx = dmaengine_submit(desc);
1449	if (dma_submit_error(s->cookie_tx)) {
1450		spin_unlock_irq(&port->lock);
1451		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1452		goto switch_to_pio;
1453	}
1454
1455	spin_unlock_irq(&port->lock);
1456	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
1457		__func__, xmit->buf, tail, head, s->cookie_tx);
1458
1459	dma_async_issue_pending(chan);
1460	return;
1461
1462switch_to_pio:
1463	spin_lock_irqsave(&port->lock, flags);
1464	s->chan_tx = NULL;
1465	sci_start_tx(port);
1466	spin_unlock_irqrestore(&port->lock, flags);
1467	return;
1468}
1469
1470static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
1471{
1472	struct sci_port *s = container_of(t, struct sci_port, rx_timer);
1473	struct dma_chan *chan = s->chan_rx;
1474	struct uart_port *port = &s->port;
1475	struct dma_tx_state state;
1476	enum dma_status status;
1477	unsigned long flags;
1478	unsigned int read;
1479	int active, count;
1480
1481	dev_dbg(port->dev, "DMA Rx timed out\n");
1482
1483	spin_lock_irqsave(&port->lock, flags);
1484
1485	active = sci_dma_rx_find_active(s);
1486	if (active < 0) {
1487		spin_unlock_irqrestore(&port->lock, flags);
1488		return HRTIMER_NORESTART;
1489	}
1490
1491	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1492	if (status == DMA_COMPLETE) {
1493		spin_unlock_irqrestore(&port->lock, flags);
1494		dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
1495			s->active_rx, active);
1496
1497		/* Let packet complete handler take care of the packet */
1498		return HRTIMER_NORESTART;
1499	}
1500
1501	dmaengine_pause(chan);
1502
1503	/*
1504	 * sometimes DMA transfer doesn't stop even if it is stopped and
1505	 * data keeps on coming until transaction is complete so check
1506	 * for DMA_COMPLETE again
1507	 * Let packet complete handler take care of the packet
1508	 */
1509	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1510	if (status == DMA_COMPLETE) {
1511		spin_unlock_irqrestore(&port->lock, flags);
1512		dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
1513		return HRTIMER_NORESTART;
1514	}
1515
1516	/* Handle incomplete DMA receive */
1517	dmaengine_terminate_async(s->chan_rx);
1518	read = sg_dma_len(&s->sg_rx[active]) - state.residue;
1519
1520	if (read) {
1521		count = sci_dma_rx_push(s, s->rx_buf[active], read);
1522		if (count)
1523			tty_flip_buffer_push(&port->state->port);
1524	}
1525
1526	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1527		sci_dma_rx_submit(s, true);
1528
1529	sci_dma_rx_reenable_irq(s);
1530
1531	spin_unlock_irqrestore(&port->lock, flags);
1532
1533	return HRTIMER_NORESTART;
1534}
1535
1536static struct dma_chan *sci_request_dma_chan(struct uart_port *port,
1537					     enum dma_transfer_direction dir)
1538{
1539	struct dma_chan *chan;
1540	struct dma_slave_config cfg;
1541	int ret;
1542
1543	chan = dma_request_slave_channel(port->dev,
1544					 dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1545	if (!chan) {
1546		dev_dbg(port->dev, "dma_request_slave_channel failed\n");
1547		return NULL;
1548	}
1549
1550	memset(&cfg, 0, sizeof(cfg));
1551	cfg.direction = dir;
1552	if (dir == DMA_MEM_TO_DEV) {
1553		cfg.dst_addr = port->mapbase +
1554			(sci_getreg(port, SCxTDR)->offset << port->regshift);
1555		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1556	} else {
1557		cfg.src_addr = port->mapbase +
1558			(sci_getreg(port, SCxRDR)->offset << port->regshift);
1559		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1560	}
1561
1562	ret = dmaengine_slave_config(chan, &cfg);
1563	if (ret) {
1564		dev_warn(port->dev, "dmaengine_slave_config failed %d\n", ret);
1565		dma_release_channel(chan);
1566		return NULL;
1567	}
1568
1569	return chan;
1570}
1571
1572static void sci_request_dma(struct uart_port *port)
1573{
1574	struct sci_port *s = to_sci_port(port);
1575	struct dma_chan *chan;
1576
1577	dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
1578
1579	/*
1580	 * DMA on console may interfere with Kernel log messages which use
1581	 * plain putchar(). So, simply don't use it with a console.
1582	 */
1583	if (uart_console(port))
1584		return;
1585
1586	if (!port->dev->of_node)
1587		return;
1588
1589	s->cookie_tx = -EINVAL;
1590
1591	/*
1592	 * Don't request a dma channel if no channel was specified
1593	 * in the device tree.
1594	 */
1595	if (!of_find_property(port->dev->of_node, "dmas", NULL))
1596		return;
1597
1598	chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
1599	dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1600	if (chan) {
1601		/* UART circular tx buffer is an aligned page. */
1602		s->tx_dma_addr = dma_map_single(chan->device->dev,
1603						port->state->xmit.buf,
1604						UART_XMIT_SIZE,
1605						DMA_TO_DEVICE);
1606		if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
1607			dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
1608			dma_release_channel(chan);
1609		} else {
1610			dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
1611				__func__, UART_XMIT_SIZE,
1612				port->state->xmit.buf, &s->tx_dma_addr);
1613
1614			INIT_WORK(&s->work_tx, sci_dma_tx_work_fn);
1615			s->chan_tx_saved = s->chan_tx = chan;
1616		}
1617	}
1618
1619	chan = sci_request_dma_chan(port, DMA_DEV_TO_MEM);
1620	dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1621	if (chan) {
1622		unsigned int i;
1623		dma_addr_t dma;
1624		void *buf;
1625
1626		s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
1627		buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2,
1628					 &dma, GFP_KERNEL);
1629		if (!buf) {
1630			dev_warn(port->dev,
1631				 "Failed to allocate Rx dma buffer, using PIO\n");
1632			dma_release_channel(chan);
1633			return;
1634		}
1635
1636		for (i = 0; i < 2; i++) {
1637			struct scatterlist *sg = &s->sg_rx[i];
1638
1639			sg_init_table(sg, 1);
1640			s->rx_buf[i] = buf;
1641			sg_dma_address(sg) = dma;
1642			sg_dma_len(sg) = s->buf_len_rx;
1643
1644			buf += s->buf_len_rx;
1645			dma += s->buf_len_rx;
1646		}
1647
1648		hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1649		s->rx_timer.function = sci_dma_rx_timer_fn;
1650
1651		s->chan_rx_saved = s->chan_rx = chan;
1652
1653		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1654			sci_dma_rx_submit(s, false);
1655	}
1656}
1657
1658static void sci_free_dma(struct uart_port *port)
1659{
1660	struct sci_port *s = to_sci_port(port);
1661
1662	if (s->chan_tx_saved)
1663		sci_dma_tx_release(s);
1664	if (s->chan_rx_saved)
1665		sci_dma_rx_release(s);
1666}
1667
1668static void sci_flush_buffer(struct uart_port *port)
1669{
1670	struct sci_port *s = to_sci_port(port);
1671
1672	/*
1673	 * In uart_flush_buffer(), the xmit circular buffer has just been
1674	 * cleared, so we have to reset tx_dma_len accordingly, and stop any
1675	 * pending transfers
1676	 */
1677	s->tx_dma_len = 0;
1678	if (s->chan_tx) {
1679		dmaengine_terminate_async(s->chan_tx);
1680		s->cookie_tx = -EINVAL;
1681	}
1682}
1683#else /* !CONFIG_SERIAL_SH_SCI_DMA */
1684static inline void sci_request_dma(struct uart_port *port)
1685{
1686}
1687
1688static inline void sci_free_dma(struct uart_port *port)
1689{
1690}
1691
1692#define sci_flush_buffer	NULL
1693#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
1694
1695static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
1696{
1697	struct uart_port *port = ptr;
1698	struct sci_port *s = to_sci_port(port);
1699
1700#ifdef CONFIG_SERIAL_SH_SCI_DMA
1701	if (s->chan_rx) {
1702		u16 scr = serial_port_in(port, SCSCR);
1703		u16 ssr = serial_port_in(port, SCxSR);
1704
1705		/* Disable future Rx interrupts */
1706		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1707			disable_irq_nosync(irq);
1708			scr |= SCSCR_RDRQE;
1709		} else {
1710			if (sci_dma_rx_submit(s, false) < 0)
1711				goto handle_pio;
1712
1713			scr &= ~SCSCR_RIE;
1714		}
1715		serial_port_out(port, SCSCR, scr);
1716		/* Clear current interrupt */
1717		serial_port_out(port, SCxSR,
1718				ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
1719		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u us\n",
1720			jiffies, s->rx_timeout);
1721		start_hrtimer_us(&s->rx_timer, s->rx_timeout);
1722
1723		return IRQ_HANDLED;
1724	}
1725
1726handle_pio:
1727#endif
1728
1729	if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
1730		if (!scif_rtrg_enabled(port))
1731			scif_set_rtrg(port, s->rx_trigger);
1732
1733		mod_timer(&s->rx_fifo_timer, jiffies + DIV_ROUND_UP(
1734			  s->rx_frame * HZ * s->rx_fifo_timeout, 1000000));
1735	}
1736
1737	/* I think sci_receive_chars has to be called irrespective
1738	 * of whether the I_IXOFF is set, otherwise, how is the interrupt
1739	 * to be disabled?
1740	 */
1741	sci_receive_chars(port);
1742
1743	return IRQ_HANDLED;
1744}
1745
1746static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
1747{
1748	struct uart_port *port = ptr;
1749	unsigned long flags;
1750
1751	spin_lock_irqsave(&port->lock, flags);
1752	sci_transmit_chars(port);
1753	spin_unlock_irqrestore(&port->lock, flags);
1754
1755	return IRQ_HANDLED;
1756}
1757
1758static irqreturn_t sci_br_interrupt(int irq, void *ptr)
1759{
1760	struct uart_port *port = ptr;
1761
1762	/* Handle BREAKs */
1763	sci_handle_breaks(port);
1764
1765	/* drop invalid character received before break was detected */
1766	serial_port_in(port, SCxRDR);
1767
1768	sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
1769
1770	return IRQ_HANDLED;
1771}
1772
1773static irqreturn_t sci_er_interrupt(int irq, void *ptr)
1774{
1775	struct uart_port *port = ptr;
1776	struct sci_port *s = to_sci_port(port);
1777
1778	if (s->irqs[SCIx_ERI_IRQ] == s->irqs[SCIx_BRI_IRQ]) {
1779		/* Break and Error interrupts are muxed */
1780		unsigned short ssr_status = serial_port_in(port, SCxSR);
1781
1782		/* Break Interrupt */
1783		if (ssr_status & SCxSR_BRK(port))
1784			sci_br_interrupt(irq, ptr);
1785
1786		/* Break only? */
1787		if (!(ssr_status & SCxSR_ERRORS(port)))
1788			return IRQ_HANDLED;
1789	}
1790
1791	/* Handle errors */
1792	if (port->type == PORT_SCI) {
1793		if (sci_handle_errors(port)) {
1794			/* discard character in rx buffer */
1795			serial_port_in(port, SCxSR);
1796			sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
1797		}
1798	} else {
1799		sci_handle_fifo_overrun(port);
1800		if (!s->chan_rx)
1801			sci_receive_chars(port);
1802	}
1803
1804	sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
1805
1806	/* Kick the transmission */
1807	if (!s->chan_tx)
1808		sci_tx_interrupt(irq, ptr);
1809
1810	return IRQ_HANDLED;
1811}
1812
1813static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
1814{
1815	unsigned short ssr_status, scr_status, err_enabled, orer_status = 0;
1816	struct uart_port *port = ptr;
1817	struct sci_port *s = to_sci_port(port);
1818	irqreturn_t ret = IRQ_NONE;
1819
1820	ssr_status = serial_port_in(port, SCxSR);
1821	scr_status = serial_port_in(port, SCSCR);
1822	if (s->params->overrun_reg == SCxSR)
1823		orer_status = ssr_status;
1824	else if (sci_getreg(port, s->params->overrun_reg)->size)
1825		orer_status = serial_port_in(port, s->params->overrun_reg);
1826
1827	err_enabled = scr_status & port_rx_irq_mask(port);
1828
1829	/* Tx Interrupt */
1830	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
1831	    !s->chan_tx)
1832		ret = sci_tx_interrupt(irq, ptr);
1833
1834	/*
1835	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
1836	 * DR flags
1837	 */
1838	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
1839	    (scr_status & SCSCR_RIE))
1840		ret = sci_rx_interrupt(irq, ptr);
1841
1842	/* Error Interrupt */
1843	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
1844		ret = sci_er_interrupt(irq, ptr);
1845
1846	/* Break Interrupt */
1847	if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] &&
1848	    (ssr_status & SCxSR_BRK(port)) && err_enabled)
1849		ret = sci_br_interrupt(irq, ptr);
1850
1851	/* Overrun Interrupt */
1852	if (orer_status & s->params->overrun_mask) {
1853		sci_handle_fifo_overrun(port);
1854		ret = IRQ_HANDLED;
1855	}
1856
1857	return ret;
1858}
1859
1860static const struct sci_irq_desc {
1861	const char	*desc;
1862	irq_handler_t	handler;
1863} sci_irq_desc[] = {
1864	/*
1865	 * Split out handlers, the default case.
1866	 */
1867	[SCIx_ERI_IRQ] = {
1868		.desc = "rx err",
1869		.handler = sci_er_interrupt,
1870	},
1871
1872	[SCIx_RXI_IRQ] = {
1873		.desc = "rx full",
1874		.handler = sci_rx_interrupt,
1875	},
1876
1877	[SCIx_TXI_IRQ] = {
1878		.desc = "tx empty",
1879		.handler = sci_tx_interrupt,
1880	},
1881
1882	[SCIx_BRI_IRQ] = {
1883		.desc = "break",
1884		.handler = sci_br_interrupt,
1885	},
1886
1887	[SCIx_DRI_IRQ] = {
1888		.desc = "rx ready",
1889		.handler = sci_rx_interrupt,
1890	},
1891
1892	[SCIx_TEI_IRQ] = {
1893		.desc = "tx end",
1894		.handler = sci_tx_interrupt,
1895	},
1896
1897	/*
1898	 * Special muxed handler.
1899	 */
1900	[SCIx_MUX_IRQ] = {
1901		.desc = "mux",
1902		.handler = sci_mpxed_interrupt,
1903	},
1904};
1905
1906static int sci_request_irq(struct sci_port *port)
1907{
1908	struct uart_port *up = &port->port;
1909	int i, j, w, ret = 0;
1910
1911	for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
1912		const struct sci_irq_desc *desc;
1913		int irq;
1914
1915		/* Check if already registered (muxed) */
1916		for (w = 0; w < i; w++)
1917			if (port->irqs[w] == port->irqs[i])
1918				w = i + 1;
1919		if (w > i)
1920			continue;
1921
1922		if (SCIx_IRQ_IS_MUXED(port)) {
1923			i = SCIx_MUX_IRQ;
1924			irq = up->irq;
1925		} else {
1926			irq = port->irqs[i];
1927
1928			/*
1929			 * Certain port types won't support all of the
1930			 * available interrupt sources.
1931			 */
1932			if (unlikely(irq < 0))
1933				continue;
1934		}
1935
1936		desc = sci_irq_desc + i;
1937		port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
1938					    dev_name(up->dev), desc->desc);
1939		if (!port->irqstr[j]) {
1940			ret = -ENOMEM;
1941			goto out_nomem;
1942		}
1943
1944		ret = request_irq(irq, desc->handler, up->irqflags,
1945				  port->irqstr[j], port);
1946		if (unlikely(ret)) {
1947			dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
1948			goto out_noirq;
1949		}
1950	}
1951
1952	return 0;
1953
1954out_noirq:
1955	while (--i >= 0)
1956		free_irq(port->irqs[i], port);
1957
1958out_nomem:
1959	while (--j >= 0)
1960		kfree(port->irqstr[j]);
1961
1962	return ret;
1963}
1964
1965static void sci_free_irq(struct sci_port *port)
1966{
1967	int i, j;
1968
1969	/*
1970	 * Intentionally in reverse order so we iterate over the muxed
1971	 * IRQ first.
1972	 */
1973	for (i = 0; i < SCIx_NR_IRQS; i++) {
1974		int irq = port->irqs[i];
1975
1976		/*
1977		 * Certain port types won't support all of the available
1978		 * interrupt sources.
1979		 */
1980		if (unlikely(irq < 0))
1981			continue;
1982
1983		/* Check if already freed (irq was muxed) */
1984		for (j = 0; j < i; j++)
1985			if (port->irqs[j] == irq)
1986				j = i + 1;
1987		if (j > i)
1988			continue;
1989
1990		free_irq(port->irqs[i], port);
1991		kfree(port->irqstr[i]);
1992
1993		if (SCIx_IRQ_IS_MUXED(port)) {
1994			/* If there's only one IRQ, we're done. */
1995			return;
1996		}
1997	}
1998}
1999
2000static unsigned int sci_tx_empty(struct uart_port *port)
2001{
2002	unsigned short status = serial_port_in(port, SCxSR);
2003	unsigned short in_tx_fifo = sci_txfill(port);
2004
2005	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
2006}
2007
2008static void sci_set_rts(struct uart_port *port, bool state)
2009{
2010	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
2011		u16 data = serial_port_in(port, SCPDR);
2012
2013		/* Active low */
2014		if (state)
2015			data &= ~SCPDR_RTSD;
2016		else
2017			data |= SCPDR_RTSD;
2018		serial_port_out(port, SCPDR, data);
2019
2020		/* RTS# is output */
2021		serial_port_out(port, SCPCR,
2022				serial_port_in(port, SCPCR) | SCPCR_RTSC);
2023	} else if (sci_getreg(port, SCSPTR)->size) {
2024		u16 ctrl = serial_port_in(port, SCSPTR);
2025
2026		/* Active low */
2027		if (state)
2028			ctrl &= ~SCSPTR_RTSDT;
2029		else
2030			ctrl |= SCSPTR_RTSDT;
2031		serial_port_out(port, SCSPTR, ctrl);
2032	}
2033}
2034
2035static bool sci_get_cts(struct uart_port *port)
2036{
2037	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
2038		/* Active low */
2039		return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
2040	} else if (sci_getreg(port, SCSPTR)->size) {
2041		/* Active low */
2042		return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
2043	}
2044
2045	return true;
2046}
2047
2048/*
2049 * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
2050 * CTS/RTS is supported in hardware by at least one port and controlled
2051 * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
2052 * handled via the ->init_pins() op, which is a bit of a one-way street,
2053 * lacking any ability to defer pin control -- this will later be
2054 * converted over to the GPIO framework).
2055 *
2056 * Other modes (such as loopback) are supported generically on certain
2057 * port types, but not others. For these it's sufficient to test for the
2058 * existence of the support register and simply ignore the port type.
2059 */
2060static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
2061{
2062	struct sci_port *s = to_sci_port(port);
2063
2064	if (mctrl & TIOCM_LOOP) {
2065		const struct plat_sci_reg *reg;
2066
2067		/*
2068		 * Standard loopback mode for SCFCR ports.
2069		 */
2070		reg = sci_getreg(port, SCFCR);
2071		if (reg->size)
2072			serial_port_out(port, SCFCR,
2073					serial_port_in(port, SCFCR) |
2074					SCFCR_LOOP);
2075	}
2076
2077	mctrl_gpio_set(s->gpios, mctrl);
2078
2079	if (!s->has_rtscts)
2080		return;
2081
2082	if (!(mctrl & TIOCM_RTS)) {
2083		/* Disable Auto RTS */
2084		serial_port_out(port, SCFCR,
2085				serial_port_in(port, SCFCR) & ~SCFCR_MCE);
2086
2087		/* Clear RTS */
2088		sci_set_rts(port, 0);
2089	} else if (s->autorts) {
2090		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
2091			/* Enable RTS# pin function */
2092			serial_port_out(port, SCPCR,
2093				serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
2094		}
2095
2096		/* Enable Auto RTS */
2097		serial_port_out(port, SCFCR,
2098				serial_port_in(port, SCFCR) | SCFCR_MCE);
2099	} else {
2100		/* Set RTS */
2101		sci_set_rts(port, 1);
2102	}
2103}
2104
2105static unsigned int sci_get_mctrl(struct uart_port *port)
2106{
2107	struct sci_port *s = to_sci_port(port);
2108	struct mctrl_gpios *gpios = s->gpios;
2109	unsigned int mctrl = 0;
2110
2111	mctrl_gpio_get(gpios, &mctrl);
2112
2113	/*
2114	 * CTS/RTS is handled in hardware when supported, while nothing
2115	 * else is wired up.
2116	 */
2117	if (s->autorts) {
2118		if (sci_get_cts(port))
2119			mctrl |= TIOCM_CTS;
2120	} else if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS)) {
2121		mctrl |= TIOCM_CTS;
2122	}
2123	if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR))
2124		mctrl |= TIOCM_DSR;
2125	if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD))
2126		mctrl |= TIOCM_CAR;
2127
2128	return mctrl;
2129}
2130
2131static void sci_enable_ms(struct uart_port *port)
2132{
2133	mctrl_gpio_enable_ms(to_sci_port(port)->gpios);
2134}
2135
2136static void sci_break_ctl(struct uart_port *port, int break_state)
2137{
2138	unsigned short scscr, scsptr;
2139	unsigned long flags;
2140
2141	/* check wheter the port has SCSPTR */
2142	if (!sci_getreg(port, SCSPTR)->size) {
2143		/*
2144		 * Not supported by hardware. Most parts couple break and rx
2145		 * interrupts together, with break detection always enabled.
2146		 */
2147		return;
2148	}
2149
2150	spin_lock_irqsave(&port->lock, flags);
2151	scsptr = serial_port_in(port, SCSPTR);
2152	scscr = serial_port_in(port, SCSCR);
2153
2154	if (break_state == -1) {
2155		scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
2156		scscr &= ~SCSCR_TE;
2157	} else {
2158		scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
2159		scscr |= SCSCR_TE;
2160	}
2161
2162	serial_port_out(port, SCSPTR, scsptr);
2163	serial_port_out(port, SCSCR, scscr);
2164	spin_unlock_irqrestore(&port->lock, flags);
2165}
2166
2167static int sci_startup(struct uart_port *port)
2168{
2169	struct sci_port *s = to_sci_port(port);
2170	int ret;
2171
2172	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
2173
2174	sci_request_dma(port);
2175
2176	ret = sci_request_irq(s);
2177	if (unlikely(ret < 0)) {
2178		sci_free_dma(port);
2179		return ret;
2180	}
2181
2182	return 0;
2183}
2184
2185static void sci_shutdown(struct uart_port *port)
2186{
2187	struct sci_port *s = to_sci_port(port);
2188	unsigned long flags;
2189	u16 scr;
2190
2191	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
2192
2193	s->autorts = false;
2194	mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
2195
2196	spin_lock_irqsave(&port->lock, flags);
2197	sci_stop_rx(port);
2198	sci_stop_tx(port);
2199	/*
2200	 * Stop RX and TX, disable related interrupts, keep clock source
2201	 * and HSCIF TOT bits
2202	 */
2203	scr = serial_port_in(port, SCSCR);
2204	serial_port_out(port, SCSCR, scr &
2205			(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
2206	spin_unlock_irqrestore(&port->lock, flags);
2207
2208#ifdef CONFIG_SERIAL_SH_SCI_DMA
2209	if (s->chan_rx_saved) {
2210		dev_dbg(port->dev, "%s(%d) deleting rx_timer\n", __func__,
2211			port->line);
2212		hrtimer_cancel(&s->rx_timer);
2213	}
2214#endif
2215
2216	if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0)
2217		del_timer_sync(&s->rx_fifo_timer);
2218	sci_free_irq(s);
2219	sci_free_dma(port);
2220}
2221
2222static int sci_sck_calc(struct sci_port *s, unsigned int bps,
2223			unsigned int *srr)
2224{
2225	unsigned long freq = s->clk_rates[SCI_SCK];
2226	int err, min_err = INT_MAX;
2227	unsigned int sr;
2228
2229	if (s->port.type != PORT_HSCIF)
2230		freq *= 2;
2231
2232	for_each_sr(sr, s) {
2233		err = DIV_ROUND_CLOSEST(freq, sr) - bps;
2234		if (abs(err) >= abs(min_err))
2235			continue;
2236
2237		min_err = err;
2238		*srr = sr - 1;
2239
2240		if (!err)
2241			break;
2242	}
2243
2244	dev_dbg(s->port.dev, "SCK: %u%+d bps using SR %u\n", bps, min_err,
2245		*srr + 1);
2246	return min_err;
2247}
2248
2249static int sci_brg_calc(struct sci_port *s, unsigned int bps,
2250			unsigned long freq, unsigned int *dlr,
2251			unsigned int *srr)
2252{
2253	int err, min_err = INT_MAX;
2254	unsigned int sr, dl;
2255
2256	if (s->port.type != PORT_HSCIF)
2257		freq *= 2;
2258
2259	for_each_sr(sr, s) {
2260		dl = DIV_ROUND_CLOSEST(freq, sr * bps);
2261		dl = clamp(dl, 1U, 65535U);
2262
2263		err = DIV_ROUND_CLOSEST(freq, sr * dl) - bps;
2264		if (abs(err) >= abs(min_err))
2265			continue;
2266
2267		min_err = err;
2268		*dlr = dl;
2269		*srr = sr - 1;
2270
2271		if (!err)
2272			break;
2273	}
2274
2275	dev_dbg(s->port.dev, "BRG: %u%+d bps using DL %u SR %u\n", bps,
2276		min_err, *dlr, *srr + 1);
2277	return min_err;
2278}
2279
2280/* calculate sample rate, BRR, and clock select */
2281static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
2282			  unsigned int *brr, unsigned int *srr,
2283			  unsigned int *cks)
2284{
2285	unsigned long freq = s->clk_rates[SCI_FCK];
2286	unsigned int sr, br, prediv, scrate, c;
2287	int err, min_err = INT_MAX;
2288
2289	if (s->port.type != PORT_HSCIF)
2290		freq *= 2;
2291
2292	/*
2293	 * Find the combination of sample rate and clock select with the
2294	 * smallest deviation from the desired baud rate.
2295	 * Prefer high sample rates to maximise the receive margin.
2296	 *
2297	 * M: Receive margin (%)
2298	 * N: Ratio of bit rate to clock (N = sampling rate)
2299	 * D: Clock duty (D = 0 to 1.0)
2300	 * L: Frame length (L = 9 to 12)
2301	 * F: Absolute value of clock frequency deviation
2302	 *
2303	 *  M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) -
2304	 *      (|D - 0.5| / N * (1 + F))|
2305	 *  NOTE: Usually, treat D for 0.5, F is 0 by this calculation.
2306	 */
2307	for_each_sr(sr, s) {
2308		for (c = 0; c <= 3; c++) {
2309			/* integerized formulas from HSCIF documentation */
2310			prediv = sr * (1 << (2 * c + 1));
2311
2312			/*
2313			 * We need to calculate:
2314			 *
2315			 *     br = freq / (prediv * bps) clamped to [1..256]
2316			 *     err = freq / (br * prediv) - bps
2317			 *
2318			 * Watch out for overflow when calculating the desired
2319			 * sampling clock rate!
2320			 */
2321			if (bps > UINT_MAX / prediv)
2322				break;
2323
2324			scrate = prediv * bps;
2325			br = DIV_ROUND_CLOSEST(freq, scrate);
2326			br = clamp(br, 1U, 256U);
2327
2328			err = DIV_ROUND_CLOSEST(freq, br * prediv) - bps;
2329			if (abs(err) >= abs(min_err))
2330				continue;
2331
2332			min_err = err;
2333			*brr = br - 1;
2334			*srr = sr - 1;
2335			*cks = c;
2336
2337			if (!err)
2338				goto found;
2339		}
2340	}
2341
2342found:
2343	dev_dbg(s->port.dev, "BRR: %u%+d bps using N %u SR %u cks %u\n", bps,
2344		min_err, *brr, *srr + 1, *cks);
2345	return min_err;
2346}
2347
2348static void sci_reset(struct uart_port *port)
2349{
2350	const struct plat_sci_reg *reg;
2351	unsigned int status;
2352	struct sci_port *s = to_sci_port(port);
2353
2354	serial_port_out(port, SCSCR, s->hscif_tot);	/* TE=0, RE=0, CKE1=0 */
2355
2356	reg = sci_getreg(port, SCFCR);
2357	if (reg->size)
2358		serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
2359
2360	sci_clear_SCxSR(port,
2361			SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
2362			SCxSR_BREAK_CLEAR(port));
2363	if (sci_getreg(port, SCLSR)->size) {
2364		status = serial_port_in(port, SCLSR);
2365		status &= ~(SCLSR_TO | SCLSR_ORER);
2366		serial_port_out(port, SCLSR, status);
2367	}
2368
2369	if (s->rx_trigger > 1) {
2370		if (s->rx_fifo_timeout) {
2371			scif_set_rtrg(port, 1);
2372			timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
2373		} else {
2374			if (port->type == PORT_SCIFA ||
2375			    port->type == PORT_SCIFB)
2376				scif_set_rtrg(port, 1);
2377			else
2378				scif_set_rtrg(port, s->rx_trigger);
2379		}
2380	}
2381}
2382
2383static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
2384			    struct ktermios *old)
2385{
2386	unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i, bits;
2387	unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
2388	unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0;
2389	struct sci_port *s = to_sci_port(port);
2390	const struct plat_sci_reg *reg;
2391	int min_err = INT_MAX, err;
2392	unsigned long max_freq = 0;
2393	int best_clk = -1;
2394	unsigned long flags;
2395
2396	if ((termios->c_cflag & CSIZE) == CS7) {
2397		smr_val |= SCSMR_CHR;
2398	} else {
2399		termios->c_cflag &= ~CSIZE;
2400		termios->c_cflag |= CS8;
2401	}
2402	if (termios->c_cflag & PARENB)
2403		smr_val |= SCSMR_PE;
2404	if (termios->c_cflag & PARODD)
2405		smr_val |= SCSMR_PE | SCSMR_ODD;
2406	if (termios->c_cflag & CSTOPB)
2407		smr_val |= SCSMR_STOP;
2408
2409	/*
2410	 * earlyprintk comes here early on with port->uartclk set to zero.
2411	 * the clock framework is not up and running at this point so here
2412	 * we assume that 115200 is the maximum baud rate. please note that
2413	 * the baud rate is not programmed during earlyprintk - it is assumed
2414	 * that the previous boot loader has enabled required clocks and
2415	 * setup the baud rate generator hardware for us already.
2416	 */
2417	if (!port->uartclk) {
2418		baud = uart_get_baud_rate(port, termios, old, 0, 115200);
2419		goto done;
2420	}
2421
2422	for (i = 0; i < SCI_NUM_CLKS; i++)
2423		max_freq = max(max_freq, s->clk_rates[i]);
2424
2425	baud = uart_get_baud_rate(port, termios, old, 0, max_freq / min_sr(s));
2426	if (!baud)
2427		goto done;
2428
2429	/*
2430	 * There can be multiple sources for the sampling clock.  Find the one
2431	 * that gives us the smallest deviation from the desired baud rate.
2432	 */
2433
2434	/* Optional Undivided External Clock */
2435	if (s->clk_rates[SCI_SCK] && port->type != PORT_SCIFA &&
2436	    port->type != PORT_SCIFB) {
2437		err = sci_sck_calc(s, baud, &srr1);
2438		if (abs(err) < abs(min_err)) {
2439			best_clk = SCI_SCK;
2440			scr_val = SCSCR_CKE1;
2441			sccks = SCCKS_CKS;
2442			min_err = err;
2443			srr = srr1;
2444			if (!err)
2445				goto done;
2446		}
2447	}
2448
2449	/* Optional BRG Frequency Divided External Clock */
2450	if (s->clk_rates[SCI_SCIF_CLK] && sci_getreg(port, SCDL)->size) {
2451		err = sci_brg_calc(s, baud, s->clk_rates[SCI_SCIF_CLK], &dl1,
2452				   &srr1);
2453		if (abs(err) < abs(min_err)) {
2454			best_clk = SCI_SCIF_CLK;
2455			scr_val = SCSCR_CKE1;
2456			sccks = 0;
2457			min_err = err;
2458			dl = dl1;
2459			srr = srr1;
2460			if (!err)
2461				goto done;
2462		}
2463	}
2464
2465	/* Optional BRG Frequency Divided Internal Clock */
2466	if (s->clk_rates[SCI_BRG_INT] && sci_getreg(port, SCDL)->size) {
2467		err = sci_brg_calc(s, baud, s->clk_rates[SCI_BRG_INT], &dl1,
2468				   &srr1);
2469		if (abs(err) < abs(min_err)) {
2470			best_clk = SCI_BRG_INT;
2471			scr_val = SCSCR_CKE1;
2472			sccks = SCCKS_XIN;
2473			min_err = err;
2474			dl = dl1;
2475			srr = srr1;
2476			if (!min_err)
2477				goto done;
2478		}
2479	}
2480
2481	/* Divided Functional Clock using standard Bit Rate Register */
2482	err = sci_scbrr_calc(s, baud, &brr1, &srr1, &cks1);
2483	if (abs(err) < abs(min_err)) {
2484		best_clk = SCI_FCK;
2485		scr_val = 0;
2486		min_err = err;
2487		brr = brr1;
2488		srr = srr1;
2489		cks = cks1;
2490	}
2491
2492done:
2493	if (best_clk >= 0)
2494		dev_dbg(port->dev, "Using clk %pC for %u%+d bps\n",
2495			s->clks[best_clk], baud, min_err);
2496
2497	sci_port_enable(s);
2498
2499	/*
2500	 * Program the optional External Baud Rate Generator (BRG) first.
2501	 * It controls the mux to select (H)SCK or frequency divided clock.
2502	 */
2503	if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) {
2504		serial_port_out(port, SCDL, dl);
2505		serial_port_out(port, SCCKS, sccks);
2506	}
2507
2508	spin_lock_irqsave(&port->lock, flags);
2509
2510	sci_reset(port);
2511
2512	uart_update_timeout(port, termios->c_cflag, baud);
2513
2514	/* byte size and parity */
2515	switch (termios->c_cflag & CSIZE) {
2516	case CS5:
2517		bits = 7;
2518		break;
2519	case CS6:
2520		bits = 8;
2521		break;
2522	case CS7:
2523		bits = 9;
2524		break;
2525	default:
2526		bits = 10;
2527		break;
2528	}
2529
2530	if (termios->c_cflag & CSTOPB)
2531		bits++;
2532	if (termios->c_cflag & PARENB)
2533		bits++;
2534
2535	if (best_clk >= 0) {
2536		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
2537			switch (srr + 1) {
2538			case 5:  smr_val |= SCSMR_SRC_5;  break;
2539			case 7:  smr_val |= SCSMR_SRC_7;  break;
2540			case 11: smr_val |= SCSMR_SRC_11; break;
2541			case 13: smr_val |= SCSMR_SRC_13; break;
2542			case 16: smr_val |= SCSMR_SRC_16; break;
2543			case 17: smr_val |= SCSMR_SRC_17; break;
2544			case 19: smr_val |= SCSMR_SRC_19; break;
2545			case 27: smr_val |= SCSMR_SRC_27; break;
2546			}
2547		smr_val |= cks;
2548		serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
2549		serial_port_out(port, SCSMR, smr_val);
2550		serial_port_out(port, SCBRR, brr);
2551		if (sci_getreg(port, HSSRR)->size) {
2552			unsigned int hssrr = srr | HSCIF_SRE;
2553			/* Calculate deviation from intended rate at the
2554			 * center of the last stop bit in sampling clocks.
2555			 */
2556			int last_stop = bits * 2 - 1;
2557			int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
2558							  (int)(srr + 1),
2559							  2 * (int)baud);
2560
2561			if (abs(deviation) >= 2) {
2562				/* At least two sampling clocks off at the
2563				 * last stop bit; we can increase the error
2564				 * margin by shifting the sampling point.
2565				 */
2566				int shift = clamp(deviation / 2, -8, 7);
2567
2568				hssrr |= (shift << HSCIF_SRHP_SHIFT) &
2569					 HSCIF_SRHP_MASK;
2570				hssrr |= HSCIF_SRDE;
2571			}
2572			serial_port_out(port, HSSRR, hssrr);
2573		}
2574
2575		/* Wait one bit interval */
2576		udelay((1000000 + (baud - 1)) / baud);
2577	} else {
2578		/* Don't touch the bit rate configuration */
2579		scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
2580		smr_val |= serial_port_in(port, SCSMR) &
2581			   (SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
2582		serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
2583		serial_port_out(port, SCSMR, smr_val);
2584	}
2585
2586	sci_init_pins(port, termios->c_cflag);
2587
2588	port->status &= ~UPSTAT_AUTOCTS;
2589	s->autorts = false;
2590	reg = sci_getreg(port, SCFCR);
2591	if (reg->size) {
2592		unsigned short ctrl = serial_port_in(port, SCFCR);
2593
2594		if ((port->flags & UPF_HARD_FLOW) &&
2595		    (termios->c_cflag & CRTSCTS)) {
2596			/* There is no CTS interrupt to restart the hardware */
2597			port->status |= UPSTAT_AUTOCTS;
2598			/* MCE is enabled when RTS is raised */
2599			s->autorts = true;
2600		}
2601
2602		/*
2603		 * As we've done a sci_reset() above, ensure we don't
2604		 * interfere with the FIFOs while toggling MCE. As the
2605		 * reset values could still be set, simply mask them out.
2606		 */
2607		ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
2608
2609		serial_port_out(port, SCFCR, ctrl);
2610	}
2611	if (port->flags & UPF_HARD_FLOW) {
2612		/* Refresh (Auto) RTS */
2613		sci_set_mctrl(port, port->mctrl);
2614	}
2615
2616	scr_val |= SCSCR_RE | SCSCR_TE |
2617		   (s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
2618	serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
2619	if ((srr + 1 == 5) &&
2620	    (port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
2621		/*
2622		 * In asynchronous mode, when the sampling rate is 1/5, first
2623		 * received data may become invalid on some SCIFA and SCIFB.
2624		 * To avoid this problem wait more than 1 serial data time (1
2625		 * bit time x serial data number) after setting SCSCR.RE = 1.
2626		 */
2627		udelay(DIV_ROUND_UP(10 * 1000000, baud));
2628	}
2629
2630	/*
2631	 * Calculate delay for 2 DMA buffers (4 FIFO).
2632	 * See serial_core.c::uart_update_timeout().
2633	 * With 10 bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above
2634	 * function calculates 1 jiffie for the data plus 5 jiffies for the
2635	 * "slop(e)." Then below we calculate 5 jiffies (20ms) for 2 DMA
2636	 * buffers (4 FIFO sizes), but when performing a faster transfer, the
2637	 * value obtained by this formula is too small. Therefore, if the value
2638	 * is smaller than 20ms, use 20ms as the timeout value for DMA.
2639	 */
2640	s->rx_frame = (10000 * bits) / (baud / 100);
2641#ifdef CONFIG_SERIAL_SH_SCI_DMA
2642	s->rx_timeout = s->buf_len_rx * 2 * s->rx_frame;
2643	if (s->rx_timeout < 20)
2644		s->rx_timeout = 20;
2645#endif
2646
2647	if ((termios->c_cflag & CREAD) != 0)
2648		sci_start_rx(port);
2649
2650	spin_unlock_irqrestore(&port->lock, flags);
2651
2652	sci_port_disable(s);
2653
2654	if (UART_ENABLE_MS(port, termios->c_cflag))
2655		sci_enable_ms(port);
2656}
2657
2658static void sci_pm(struct uart_port *port, unsigned int state,
2659		   unsigned int oldstate)
2660{
2661	struct sci_port *sci_port = to_sci_port(port);
2662
2663	switch (state) {
2664	case UART_PM_STATE_OFF:
2665		sci_port_disable(sci_port);
2666		break;
2667	default:
2668		sci_port_enable(sci_port);
2669		break;
2670	}
2671}
2672
2673static const char *sci_type(struct uart_port *port)
2674{
2675	switch (port->type) {
2676	case PORT_IRDA:
2677		return "irda";
2678	case PORT_SCI:
2679		return "sci";
2680	case PORT_SCIF:
2681		return "scif";
2682	case PORT_SCIFA:
2683		return "scifa";
2684	case PORT_SCIFB:
2685		return "scifb";
2686	case PORT_HSCIF:
2687		return "hscif";
2688	}
2689
2690	return NULL;
2691}
2692
2693static int sci_remap_port(struct uart_port *port)
2694{
2695	struct sci_port *sport = to_sci_port(port);
2696
2697	/*
2698	 * Nothing to do if there's already an established membase.
2699	 */
2700	if (port->membase)
2701		return 0;
2702
2703	if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
2704		port->membase = ioremap(port->mapbase, sport->reg_size);
2705		if (unlikely(!port->membase)) {
2706			dev_err(port->dev, "can't remap port#%d\n", port->line);
2707			return -ENXIO;
2708		}
2709	} else {
2710		/*
2711		 * For the simple (and majority of) cases where we don't
2712		 * need to do any remapping, just cast the cookie
2713		 * directly.
2714		 */
2715		port->membase = (void __iomem *)(uintptr_t)port->mapbase;
2716	}
2717
2718	return 0;
2719}
2720
2721static void sci_release_port(struct uart_port *port)
2722{
2723	struct sci_port *sport = to_sci_port(port);
2724
2725	if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
2726		iounmap(port->membase);
2727		port->membase = NULL;
2728	}
2729
2730	release_mem_region(port->mapbase, sport->reg_size);
2731}
2732
2733static int sci_request_port(struct uart_port *port)
2734{
2735	struct resource *res;
2736	struct sci_port *sport = to_sci_port(port);
2737	int ret;
2738
2739	res = request_mem_region(port->mapbase, sport->reg_size,
2740				 dev_name(port->dev));
2741	if (unlikely(res == NULL)) {
2742		dev_err(port->dev, "request_mem_region failed.");
2743		return -EBUSY;
2744	}
2745
2746	ret = sci_remap_port(port);
2747	if (unlikely(ret != 0)) {
2748		release_resource(res);
2749		return ret;
2750	}
2751
2752	return 0;
2753}
2754
2755static void sci_config_port(struct uart_port *port, int flags)
2756{
2757	if (flags & UART_CONFIG_TYPE) {
2758		struct sci_port *sport = to_sci_port(port);
2759
2760		port->type = sport->cfg->type;
2761		sci_request_port(port);
2762	}
2763}
2764
2765static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
2766{
2767	if (ser->baud_base < 2400)
2768		/* No paper tape reader for Mitch.. */
2769		return -EINVAL;
2770
2771	return 0;
2772}
2773
2774static const struct uart_ops sci_uart_ops = {
2775	.tx_empty	= sci_tx_empty,
2776	.set_mctrl	= sci_set_mctrl,
2777	.get_mctrl	= sci_get_mctrl,
2778	.start_tx	= sci_start_tx,
2779	.stop_tx	= sci_stop_tx,
2780	.stop_rx	= sci_stop_rx,
2781	.enable_ms	= sci_enable_ms,
2782	.break_ctl	= sci_break_ctl,
2783	.startup	= sci_startup,
2784	.shutdown	= sci_shutdown,
2785	.flush_buffer	= sci_flush_buffer,
2786	.set_termios	= sci_set_termios,
2787	.pm		= sci_pm,
2788	.type		= sci_type,
2789	.release_port	= sci_release_port,
2790	.request_port	= sci_request_port,
2791	.config_port	= sci_config_port,
2792	.verify_port	= sci_verify_port,
2793#ifdef CONFIG_CONSOLE_POLL
2794	.poll_get_char	= sci_poll_get_char,
2795	.poll_put_char	= sci_poll_put_char,
2796#endif
2797};
2798
2799static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
2800{
2801	const char *clk_names[] = {
2802		[SCI_FCK] = "fck",
2803		[SCI_SCK] = "sck",
2804		[SCI_BRG_INT] = "brg_int",
2805		[SCI_SCIF_CLK] = "scif_clk",
2806	};
2807	struct clk *clk;
2808	unsigned int i;
2809
2810	if (sci_port->cfg->type == PORT_HSCIF)
2811		clk_names[SCI_SCK] = "hsck";
2812
2813	for (i = 0; i < SCI_NUM_CLKS; i++) {
2814		clk = devm_clk_get(dev, clk_names[i]);
2815		if (PTR_ERR(clk) == -EPROBE_DEFER)
2816			return -EPROBE_DEFER;
2817
2818		if (IS_ERR(clk) && i == SCI_FCK) {
2819			/*
2820			 * "fck" used to be called "sci_ick", and we need to
2821			 * maintain DT backward compatibility.
2822			 */
2823			clk = devm_clk_get(dev, "sci_ick");
2824			if (PTR_ERR(clk) == -EPROBE_DEFER)
2825				return -EPROBE_DEFER;
2826
2827			if (!IS_ERR(clk))
2828				goto found;
2829
2830			/*
2831			 * Not all SH platforms declare a clock lookup entry
2832			 * for SCI devices, in which case we need to get the
2833			 * global "peripheral_clk" clock.
2834			 */
2835			clk = devm_clk_get(dev, "peripheral_clk");
2836			if (!IS_ERR(clk))
2837				goto found;
2838
2839			dev_err(dev, "failed to get %s (%ld)\n", clk_names[i],
2840				PTR_ERR(clk));
2841			return PTR_ERR(clk);
2842		}
2843
2844found:
2845		if (IS_ERR(clk))
2846			dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
2847				PTR_ERR(clk));
2848		else
2849			dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
2850				clk, clk_get_rate(clk));
2851		sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
2852	}
2853	return 0;
2854}
2855
2856static const struct sci_port_params *
2857sci_probe_regmap(const struct plat_sci_port *cfg)
2858{
2859	unsigned int regtype;
2860
2861	if (cfg->regtype != SCIx_PROBE_REGTYPE)
2862		return &sci_port_params[cfg->regtype];
2863
2864	switch (cfg->type) {
2865	case PORT_SCI:
2866		regtype = SCIx_SCI_REGTYPE;
2867		break;
2868	case PORT_IRDA:
2869		regtype = SCIx_IRDA_REGTYPE;
2870		break;
2871	case PORT_SCIFA:
2872		regtype = SCIx_SCIFA_REGTYPE;
2873		break;
2874	case PORT_SCIFB:
2875		regtype = SCIx_SCIFB_REGTYPE;
2876		break;
2877	case PORT_SCIF:
2878		/*
2879		 * The SH-4 is a bit of a misnomer here, although that's
2880		 * where this particular port layout originated. This
2881		 * configuration (or some slight variation thereof)
2882		 * remains the dominant model for all SCIFs.
2883		 */
2884		regtype = SCIx_SH4_SCIF_REGTYPE;
2885		break;
2886	case PORT_HSCIF:
2887		regtype = SCIx_HSCIF_REGTYPE;
2888		break;
2889	default:
2890		pr_err("Can't probe register map for given port\n");
2891		return NULL;
2892	}
2893
2894	return &sci_port_params[regtype];
2895}
2896
2897static int sci_init_single(struct platform_device *dev,
2898			   struct sci_port *sci_port, unsigned int index,
2899			   const struct plat_sci_port *p, bool early)
2900{
2901	struct uart_port *port = &sci_port->port;
2902	const struct resource *res;
2903	unsigned int i;
2904	int ret;
2905
2906	sci_port->cfg	= p;
2907
2908	port->ops	= &sci_uart_ops;
2909	port->iotype	= UPIO_MEM;
2910	port->line	= index;
2911	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SH_SCI_CONSOLE);
2912
2913	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
2914	if (res == NULL)
2915		return -ENOMEM;
2916
2917	port->mapbase = res->start;
2918	sci_port->reg_size = resource_size(res);
2919
2920	for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) {
2921		if (i)
2922			sci_port->irqs[i] = platform_get_irq_optional(dev, i);
2923		else
2924			sci_port->irqs[i] = platform_get_irq(dev, i);
2925	}
2926
2927	/*
2928	 * The fourth interrupt on SCI port is transmit end interrupt, so
2929	 * shuffle the interrupts.
2930	 */
2931	if (p->type == PORT_SCI)
2932		swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
2933
2934	/* The SCI generates several interrupts. They can be muxed together or
2935	 * connected to different interrupt lines. In the muxed case only one
2936	 * interrupt resource is specified as there is only one interrupt ID.
2937	 * In the non-muxed case, up to 6 interrupt signals might be generated
2938	 * from the SCI, however those signals might have their own individual
2939	 * interrupt ID numbers, or muxed together with another interrupt.
2940	 */
2941	if (sci_port->irqs[0] < 0)
2942		return -ENXIO;
2943
2944	if (sci_port->irqs[1] < 0)
2945		for (i = 1; i < ARRAY_SIZE(sci_port->irqs); i++)
2946			sci_port->irqs[i] = sci_port->irqs[0];
2947
2948	sci_port->params = sci_probe_regmap(p);
2949	if (unlikely(sci_port->params == NULL))
2950		return -EINVAL;
2951
2952	switch (p->type) {
2953	case PORT_SCIFB:
2954		sci_port->rx_trigger = 48;
2955		break;
2956	case PORT_HSCIF:
2957		sci_port->rx_trigger = 64;
2958		break;
2959	case PORT_SCIFA:
2960		sci_port->rx_trigger = 32;
2961		break;
2962	case PORT_SCIF:
2963		if (p->regtype == SCIx_SH7705_SCIF_REGTYPE)
2964			/* RX triggering not implemented for this IP */
2965			sci_port->rx_trigger = 1;
2966		else
2967			sci_port->rx_trigger = 8;
2968		break;
2969	default:
2970		sci_port->rx_trigger = 1;
2971		break;
2972	}
2973
2974	sci_port->rx_fifo_timeout = 0;
2975	sci_port->hscif_tot = 0;
2976
2977	/* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
2978	 * match the SoC datasheet, this should be investigated. Let platform
2979	 * data override the sampling rate for now.
2980	 */
2981	sci_port->sampling_rate_mask = p->sampling_rate
2982				     ? SCI_SR(p->sampling_rate)
2983				     : sci_port->params->sampling_rate_mask;
2984
2985	if (!early) {
2986		ret = sci_init_clocks(sci_port, &dev->dev);
2987		if (ret < 0)
2988			return ret;
2989
2990		port->dev = &dev->dev;
2991
2992		pm_runtime_enable(&dev->dev);
2993	}
2994
2995	port->type		= p->type;
2996	port->flags		= UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
2997	port->fifosize		= sci_port->params->fifosize;
2998
2999	if (port->type == PORT_SCI && !dev->dev.of_node) {
3000		if (sci_port->reg_size >= 0x20)
3001			port->regshift = 2;
3002		else
3003			port->regshift = 1;
3004	}
3005
3006	/*
3007	 * The UART port needs an IRQ value, so we peg this to the RX IRQ
3008	 * for the multi-IRQ ports, which is where we are primarily
3009	 * concerned with the shutdown path synchronization.
3010	 *
3011	 * For the muxed case there's nothing more to do.
3012	 */
3013	port->irq		= sci_port->irqs[SCIx_RXI_IRQ];
3014	port->irqflags		= 0;
3015
3016	port->serial_in		= sci_serial_in;
3017	port->serial_out	= sci_serial_out;
3018
3019	return 0;
3020}
3021
3022static void sci_cleanup_single(struct sci_port *port)
3023{
3024	pm_runtime_disable(port->port.dev);
3025}
3026
3027#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
3028    defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
3029static void serial_console_putchar(struct uart_port *port, int ch)
3030{
3031	sci_poll_put_char(port, ch);
3032}
3033
3034/*
3035 *	Print a string to the serial port trying not to disturb
3036 *	any possible real use of the port...
3037 */
3038static void serial_console_write(struct console *co, const char *s,
3039				 unsigned count)
3040{
3041	struct sci_port *sci_port = &sci_ports[co->index];
3042	struct uart_port *port = &sci_port->port;
3043	unsigned short bits, ctrl, ctrl_temp;
3044	unsigned long flags;
3045	int locked = 1;
3046
3047	if (port->sysrq)
3048		locked = 0;
3049	else if (oops_in_progress)
3050		locked = spin_trylock_irqsave(&port->lock, flags);
3051	else
3052		spin_lock_irqsave(&port->lock, flags);
3053
3054	/* first save SCSCR then disable interrupts, keep clock source */
3055	ctrl = serial_port_in(port, SCSCR);
3056	ctrl_temp = SCSCR_RE | SCSCR_TE |
3057		    (sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
3058		    (ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
3059	serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
3060
3061	uart_console_write(port, s, count, serial_console_putchar);
3062
3063	/* wait until fifo is empty and last bit has been transmitted */
3064	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
3065	while ((serial_port_in(port, SCxSR) & bits) != bits)
3066		cpu_relax();
3067
3068	/* restore the SCSCR */
3069	serial_port_out(port, SCSCR, ctrl);
3070
3071	if (locked)
3072		spin_unlock_irqrestore(&port->lock, flags);
3073}
3074
3075static int serial_console_setup(struct console *co, char *options)
3076{
3077	struct sci_port *sci_port;
3078	struct uart_port *port;
3079	int baud = 115200;
3080	int bits = 8;
3081	int parity = 'n';
3082	int flow = 'n';
3083	int ret;
3084
3085	/*
3086	 * Refuse to handle any bogus ports.
3087	 */
3088	if (co->index < 0 || co->index >= SCI_NPORTS)
3089		return -ENODEV;
3090
3091	sci_port = &sci_ports[co->index];
3092	port = &sci_port->port;
3093
3094	/*
3095	 * Refuse to handle uninitialized ports.
3096	 */
3097	if (!port->ops)
3098		return -ENODEV;
3099
3100	ret = sci_remap_port(port);
3101	if (unlikely(ret != 0))
3102		return ret;
3103
3104	if (options)
3105		uart_parse_options(options, &baud, &parity, &bits, &flow);
3106
3107	return uart_set_options(port, co, baud, parity, bits, flow);
3108}
3109
3110static struct console serial_console = {
3111	.name		= "ttySC",
3112	.device		= uart_console_device,
3113	.write		= serial_console_write,
3114	.setup		= serial_console_setup,
3115	.flags		= CON_PRINTBUFFER,
3116	.index		= -1,
3117	.data		= &sci_uart_driver,
3118};
3119
3120#ifdef CONFIG_SUPERH
3121static struct console early_serial_console = {
3122	.name           = "early_ttySC",
3123	.write          = serial_console_write,
3124	.flags          = CON_PRINTBUFFER,
3125	.index		= -1,
3126};
3127
3128static char early_serial_buf[32];
3129
3130static int sci_probe_earlyprintk(struct platform_device *pdev)
3131{
3132	const struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
3133
3134	if (early_serial_console.data)
3135		return -EEXIST;
3136
3137	early_serial_console.index = pdev->id;
3138
3139	sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
3140
3141	serial_console_setup(&early_serial_console, early_serial_buf);
3142
3143	if (!strstr(early_serial_buf, "keep"))
3144		early_serial_console.flags |= CON_BOOT;
3145
3146	register_console(&early_serial_console);
3147	return 0;
3148}
3149#endif
3150
3151#define SCI_CONSOLE	(&serial_console)
3152
3153#else
3154static inline int sci_probe_earlyprintk(struct platform_device *pdev)
3155{
3156	return -EINVAL;
3157}
3158
3159#define SCI_CONSOLE	NULL
3160
3161#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */
3162
3163static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
3164
3165static DEFINE_MUTEX(sci_uart_registration_lock);
3166static struct uart_driver sci_uart_driver = {
3167	.owner		= THIS_MODULE,
3168	.driver_name	= "sci",
3169	.dev_name	= "ttySC",
3170	.major		= SCI_MAJOR,
3171	.minor		= SCI_MINOR_START,
3172	.nr		= SCI_NPORTS,
3173	.cons		= SCI_CONSOLE,
3174};
3175
3176static int sci_remove(struct platform_device *dev)
3177{
3178	struct sci_port *port = platform_get_drvdata(dev);
3179	unsigned int type = port->port.type;	/* uart_remove_... clears it */
3180
3181	sci_ports_in_use &= ~BIT(port->port.line);
3182	uart_remove_one_port(&sci_uart_driver, &port->port);
3183
3184	sci_cleanup_single(port);
3185
3186	if (port->port.fifosize > 1)
3187		device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
3188	if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
3189		device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout);
3190
3191	return 0;
3192}
3193
3194
3195#define SCI_OF_DATA(type, regtype)	(void *)((type) << 16 | (regtype))
3196#define SCI_OF_TYPE(data)		((unsigned long)(data) >> 16)
3197#define SCI_OF_REGTYPE(data)		((unsigned long)(data) & 0xffff)
3198
3199static const struct of_device_id of_sci_match[] = {
3200	/* SoC-specific types */
3201	{
3202		.compatible = "renesas,scif-r7s72100",
3203		.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
3204	},
3205	{
3206		.compatible = "renesas,scif-r7s9210",
3207		.data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
3208	},
3209	/* Family-specific types */
3210	{
3211		.compatible = "renesas,rcar-gen1-scif",
3212		.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
3213	}, {
3214		.compatible = "renesas,rcar-gen2-scif",
3215		.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
3216	}, {
3217		.compatible = "renesas,rcar-gen3-scif",
3218		.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
3219	},
3220	/* Generic types */
3221	{
3222		.compatible = "renesas,scif",
3223		.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_REGTYPE),
3224	}, {
3225		.compatible = "renesas,scifa",
3226		.data = SCI_OF_DATA(PORT_SCIFA, SCIx_SCIFA_REGTYPE),
3227	}, {
3228		.compatible = "renesas,scifb",
3229		.data = SCI_OF_DATA(PORT_SCIFB, SCIx_SCIFB_REGTYPE),
3230	}, {
3231		.compatible = "renesas,hscif",
3232		.data = SCI_OF_DATA(PORT_HSCIF, SCIx_HSCIF_REGTYPE),
3233	}, {
3234		.compatible = "renesas,sci",
3235		.data = SCI_OF_DATA(PORT_SCI, SCIx_SCI_REGTYPE),
3236	}, {
3237		/* Terminator */
3238	},
3239};
3240MODULE_DEVICE_TABLE(of, of_sci_match);
3241
3242static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
3243					  unsigned int *dev_id)
3244{
3245	struct device_node *np = pdev->dev.of_node;
3246	struct plat_sci_port *p;
3247	struct sci_port *sp;
3248	const void *data;
3249	int id;
3250
3251	if (!IS_ENABLED(CONFIG_OF) || !np)
3252		return NULL;
3253
3254	data = of_device_get_match_data(&pdev->dev);
3255
3256	p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
3257	if (!p)
3258		return NULL;
3259
3260	/* Get the line number from the aliases node. */
3261	id = of_alias_get_id(np, "serial");
3262	if (id < 0 && ~sci_ports_in_use)
3263		id = ffz(sci_ports_in_use);
3264	if (id < 0) {
3265		dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
3266		return NULL;
3267	}
3268	if (id >= ARRAY_SIZE(sci_ports)) {
3269		dev_err(&pdev->dev, "serial%d out of range\n", id);
3270		return NULL;
3271	}
3272
3273	sp = &sci_ports[id];
3274	*dev_id = id;
3275
3276	p->type = SCI_OF_TYPE(data);
3277	p->regtype = SCI_OF_REGTYPE(data);
3278
3279	sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts");
3280
3281	return p;
3282}
3283
3284static int sci_probe_single(struct platform_device *dev,
3285				      unsigned int index,
3286				      struct plat_sci_port *p,
3287				      struct sci_port *sciport)
3288{
3289	int ret;
3290
3291	/* Sanity check */
3292	if (unlikely(index >= SCI_NPORTS)) {
3293		dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n",
3294			   index+1, SCI_NPORTS);
3295		dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
3296		return -EINVAL;
3297	}
3298	BUILD_BUG_ON(SCI_NPORTS > sizeof(sci_ports_in_use) * 8);
3299	if (sci_ports_in_use & BIT(index))
3300		return -EBUSY;
3301
3302	mutex_lock(&sci_uart_registration_lock);
3303	if (!sci_uart_driver.state) {
3304		ret = uart_register_driver(&sci_uart_driver);
3305		if (ret) {
3306			mutex_unlock(&sci_uart_registration_lock);
3307			return ret;
3308		}
3309	}
3310	mutex_unlock(&sci_uart_registration_lock);
3311
3312	ret = sci_init_single(dev, sciport, index, p, false);
3313	if (ret)
3314		return ret;
3315
3316	sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
3317	if (IS_ERR(sciport->gpios))
3318		return PTR_ERR(sciport->gpios);
3319
3320	if (sciport->has_rtscts) {
3321		if (mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_CTS) ||
3322		    mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_RTS)) {
3323			dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
3324			return -EINVAL;
3325		}
3326		sciport->port.flags |= UPF_HARD_FLOW;
3327	}
3328
3329	ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
3330	if (ret) {
3331		sci_cleanup_single(sciport);
3332		return ret;
3333	}
3334
3335	return 0;
3336}
3337
3338static int sci_probe(struct platform_device *dev)
3339{
3340	struct plat_sci_port *p;
3341	struct sci_port *sp;
3342	unsigned int dev_id;
3343	int ret;
3344
3345	/*
3346	 * If we've come here via earlyprintk initialization, head off to
3347	 * the special early probe. We don't have sufficient device state
3348	 * to make it beyond this yet.
3349	 */
3350#ifdef CONFIG_SUPERH
3351	if (is_sh_early_platform_device(dev))
3352		return sci_probe_earlyprintk(dev);
3353#endif
3354
3355	if (dev->dev.of_node) {
3356		p = sci_parse_dt(dev, &dev_id);
3357		if (p == NULL)
3358			return -EINVAL;
3359	} else {
3360		p = dev->dev.platform_data;
3361		if (p == NULL) {
3362			dev_err(&dev->dev, "no platform data supplied\n");
3363			return -EINVAL;
3364		}
3365
3366		dev_id = dev->id;
3367	}
3368
3369	sp = &sci_ports[dev_id];
3370	platform_set_drvdata(dev, sp);
3371
3372	ret = sci_probe_single(dev, dev_id, p, sp);
3373	if (ret)
3374		return ret;
3375
3376	if (sp->port.fifosize > 1) {
3377		ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_trigger);
3378		if (ret)
3379			return ret;
3380	}
3381	if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB ||
3382	    sp->port.type == PORT_HSCIF) {
3383		ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_timeout);
3384		if (ret) {
3385			if (sp->port.fifosize > 1) {
3386				device_remove_file(&dev->dev,
3387						   &dev_attr_rx_fifo_trigger);
3388			}
3389			return ret;
3390		}
3391	}
3392
3393#ifdef CONFIG_SH_STANDARD_BIOS
3394	sh_bios_gdb_detach();
3395#endif
3396
3397	sci_ports_in_use |= BIT(dev_id);
3398	return 0;
3399}
3400
3401static __maybe_unused int sci_suspend(struct device *dev)
3402{
3403	struct sci_port *sport = dev_get_drvdata(dev);
3404
3405	if (sport)
3406		uart_suspend_port(&sci_uart_driver, &sport->port);
3407
3408	return 0;
3409}
3410
3411static __maybe_unused int sci_resume(struct device *dev)
3412{
3413	struct sci_port *sport = dev_get_drvdata(dev);
3414
3415	if (sport)
3416		uart_resume_port(&sci_uart_driver, &sport->port);
3417
3418	return 0;
3419}
3420
3421static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume);
3422
3423static struct platform_driver sci_driver = {
3424	.probe		= sci_probe,
3425	.remove		= sci_remove,
3426	.driver		= {
3427		.name	= "sh-sci",
3428		.pm	= &sci_dev_pm_ops,
3429		.of_match_table = of_match_ptr(of_sci_match),
3430	},
3431};
3432
3433static int __init sci_init(void)
3434{
3435	pr_info("%s\n", banner);
3436
3437	return platform_driver_register(&sci_driver);
3438}
3439
3440static void __exit sci_exit(void)
3441{
3442	platform_driver_unregister(&sci_driver);
3443
3444	if (sci_uart_driver.state)
3445		uart_unregister_driver(&sci_uart_driver);
3446}
3447
3448#if defined(CONFIG_SUPERH) && defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
3449sh_early_platform_init_buffer("earlyprintk", &sci_driver,
3450			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
3451#endif
3452#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
3453static struct plat_sci_port port_cfg __initdata;
3454
3455static int __init early_console_setup(struct earlycon_device *device,
3456				      int type)
3457{
3458	if (!device->port.membase)
3459		return -ENODEV;
3460
3461	device->port.serial_in = sci_serial_in;
3462	device->port.serial_out	= sci_serial_out;
3463	device->port.type = type;
3464	memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
3465	port_cfg.type = type;
3466	sci_ports[0].cfg = &port_cfg;
3467	sci_ports[0].params = sci_probe_regmap(&port_cfg);
3468	port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
3469	sci_serial_out(&sci_ports[0].port, SCSCR,
3470		       SCSCR_RE | SCSCR_TE | port_cfg.scscr);
3471
3472	device->con->write = serial_console_write;
3473	return 0;
3474}
3475static int __init sci_early_console_setup(struct earlycon_device *device,
3476					  const char *opt)
3477{
3478	return early_console_setup(device, PORT_SCI);
3479}
3480static int __init scif_early_console_setup(struct earlycon_device *device,
3481					  const char *opt)
3482{
3483	return early_console_setup(device, PORT_SCIF);
3484}
3485static int __init rzscifa_early_console_setup(struct earlycon_device *device,
3486					  const char *opt)
3487{
3488	port_cfg.regtype = SCIx_RZ_SCIFA_REGTYPE;
3489	return early_console_setup(device, PORT_SCIF);
3490}
3491static int __init scifa_early_console_setup(struct earlycon_device *device,
3492					  const char *opt)
3493{
3494	return early_console_setup(device, PORT_SCIFA);
3495}
3496static int __init scifb_early_console_setup(struct earlycon_device *device,
3497					  const char *opt)
3498{
3499	return early_console_setup(device, PORT_SCIFB);
3500}
3501static int __init hscif_early_console_setup(struct earlycon_device *device,
3502					  const char *opt)
3503{
3504	return early_console_setup(device, PORT_HSCIF);
3505}
3506
3507OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
3508OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
3509OF_EARLYCON_DECLARE(scif, "renesas,scif-r7s9210", rzscifa_early_console_setup);
3510OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
3511OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
3512OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);
3513#endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */
3514
3515module_init(sci_init);
3516module_exit(sci_exit);
3517
3518MODULE_LICENSE("GPL");
3519MODULE_ALIAS("platform:sh-sci");
3520MODULE_AUTHOR("Paul Mundt");
3521MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");
3522