1// SPDX-License-Identifier: GPL-2.0-only
2/* OMAP SSI port driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
5 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
6 *
7 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
8 */
9
10#include <linux/mod_devicetable.h>
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/pm_runtime.h>
14#include <linux/delay.h>
15
16#include <linux/gpio/consumer.h>
17#include <linux/pinctrl/consumer.h>
18#include <linux/debugfs.h>
19
20#include "omap_ssi_regs.h"
21#include "omap_ssi.h"
22
23static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
24{
25	return 0;
26}
27
28static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
29{
30	return 0;
31}
32
33static inline unsigned int ssi_wakein(struct hsi_port *port)
34{
35	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
36	return gpiod_get_value(omap_port->wake_gpio);
37}
38
39#ifdef CONFIG_DEBUG_FS
40static void ssi_debug_remove_port(struct hsi_port *port)
41{
42	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
43
44	debugfs_remove_recursive(omap_port->dir);
45}
46
47static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
48{
49	struct hsi_port *port = m->private;
50	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
51	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
52	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
53	void __iomem	*base = omap_ssi->sys;
54	unsigned int ch;
55
56	pm_runtime_get_sync(omap_port->pdev);
57	if (omap_port->wake_irq > 0)
58		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
59	seq_printf(m, "WAKE\t\t: 0x%08x\n",
60				readl(base + SSI_WAKE_REG(port->num)));
61	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
62			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
63	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
64			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
65	/* SST */
66	base = omap_port->sst_base;
67	seq_puts(m, "\nSST\n===\n");
68	seq_printf(m, "ID SST\t\t: 0x%08x\n",
69				readl(base + SSI_SST_ID_REG));
70	seq_printf(m, "MODE\t\t: 0x%08x\n",
71				readl(base + SSI_SST_MODE_REG));
72	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
73				readl(base + SSI_SST_FRAMESIZE_REG));
74	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
75				readl(base + SSI_SST_DIVISOR_REG));
76	seq_printf(m, "CHANNELS\t: 0x%08x\n",
77				readl(base + SSI_SST_CHANNELS_REG));
78	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
79				readl(base + SSI_SST_ARBMODE_REG));
80	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
81				readl(base + SSI_SST_TXSTATE_REG));
82	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
83				readl(base + SSI_SST_BUFSTATE_REG));
84	seq_printf(m, "BREAK\t\t: 0x%08x\n",
85				readl(base + SSI_SST_BREAK_REG));
86	for (ch = 0; ch < omap_port->channels; ch++) {
87		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
88				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
89	}
90	/* SSR */
91	base = omap_port->ssr_base;
92	seq_puts(m, "\nSSR\n===\n");
93	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
94				readl(base + SSI_SSR_ID_REG));
95	seq_printf(m, "MODE\t\t: 0x%08x\n",
96				readl(base + SSI_SSR_MODE_REG));
97	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
98				readl(base + SSI_SSR_FRAMESIZE_REG));
99	seq_printf(m, "CHANNELS\t: 0x%08x\n",
100				readl(base + SSI_SSR_CHANNELS_REG));
101	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
102				readl(base + SSI_SSR_TIMEOUT_REG));
103	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
104				readl(base + SSI_SSR_RXSTATE_REG));
105	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
106				readl(base + SSI_SSR_BUFSTATE_REG));
107	seq_printf(m, "BREAK\t\t: 0x%08x\n",
108				readl(base + SSI_SSR_BREAK_REG));
109	seq_printf(m, "ERROR\t\t: 0x%08x\n",
110				readl(base + SSI_SSR_ERROR_REG));
111	seq_printf(m, "ERRORACK\t: 0x%08x\n",
112				readl(base + SSI_SSR_ERRORACK_REG));
113	for (ch = 0; ch < omap_port->channels; ch++) {
114		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
115				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
116	}
117	pm_runtime_put_autosuspend(omap_port->pdev);
118
119	return 0;
120}
121
122DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
123
124static int ssi_div_get(void *data, u64 *val)
125{
126	struct hsi_port *port = data;
127	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
128
129	pm_runtime_get_sync(omap_port->pdev);
130	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
131	pm_runtime_put_autosuspend(omap_port->pdev);
132
133	return 0;
134}
135
136static int ssi_div_set(void *data, u64 val)
137{
138	struct hsi_port *port = data;
139	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
140
141	if (val > 127)
142		return -EINVAL;
143
144	pm_runtime_get_sync(omap_port->pdev);
145	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
146	omap_port->sst.divisor = val;
147	pm_runtime_put_autosuspend(omap_port->pdev);
148
149	return 0;
150}
151
152DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
153
154static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
155				     struct dentry *dir)
156{
157	struct hsi_port *port = to_hsi_port(omap_port->dev);
158
159	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
160	if (!dir)
161		return -ENOMEM;
162	omap_port->dir = dir;
163	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
164	dir = debugfs_create_dir("sst", dir);
165	if (!dir)
166		return -ENOMEM;
167	debugfs_create_file_unsafe("divisor", 0644, dir, port,
168				   &ssi_sst_div_fops);
169
170	return 0;
171}
172#endif
173
174static void ssi_process_errqueue(struct work_struct *work)
175{
176	struct omap_ssi_port *omap_port;
177	struct list_head *head, *tmp;
178	struct hsi_msg *msg;
179
180	omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
181
182	list_for_each_safe(head, tmp, &omap_port->errqueue) {
183		msg = list_entry(head, struct hsi_msg, link);
184		msg->complete(msg);
185		list_del(head);
186	}
187}
188
189static int ssi_claim_lch(struct hsi_msg *msg)
190{
191
192	struct hsi_port *port = hsi_get_port(msg->cl);
193	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
194	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
195	int lch;
196
197	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
198		if (!omap_ssi->gdd_trn[lch].msg) {
199			omap_ssi->gdd_trn[lch].msg = msg;
200			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
201			return lch;
202		}
203
204	return -EBUSY;
205}
206
207static int ssi_start_dma(struct hsi_msg *msg, int lch)
208{
209	struct hsi_port *port = hsi_get_port(msg->cl);
210	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
211	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
212	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
213	void __iomem *gdd = omap_ssi->gdd;
214	int err;
215	u16 csdp;
216	u16 ccr;
217	u32 s_addr;
218	u32 d_addr;
219	u32 tmp;
220
221	/* Hold clocks during the transfer */
222	pm_runtime_get(omap_port->pdev);
223
224	if (!pm_runtime_active(omap_port->pdev)) {
225		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
226		pm_runtime_put_autosuspend(omap_port->pdev);
227		return -EREMOTEIO;
228	}
229
230	if (msg->ttype == HSI_MSG_READ) {
231		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
232							DMA_FROM_DEVICE);
233		if (!err) {
234			dev_dbg(&ssi->device, "DMA map SG failed !\n");
235			pm_runtime_put_autosuspend(omap_port->pdev);
236			return -EIO;
237		}
238		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
239			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
240			SSI_DATA_TYPE_S32;
241		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
242		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
243			SSI_CCR_ENABLE;
244		s_addr = omap_port->ssr_dma +
245					SSI_SSR_BUFFER_CH_REG(msg->channel);
246		d_addr = sg_dma_address(msg->sgt.sgl);
247	} else {
248		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
249							DMA_TO_DEVICE);
250		if (!err) {
251			dev_dbg(&ssi->device, "DMA map SG failed !\n");
252			pm_runtime_put_autosuspend(omap_port->pdev);
253			return -EIO;
254		}
255		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
256			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
257			SSI_DATA_TYPE_S32;
258		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
259		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
260			SSI_CCR_ENABLE;
261		s_addr = sg_dma_address(msg->sgt.sgl);
262		d_addr = omap_port->sst_dma +
263					SSI_SST_BUFFER_CH_REG(msg->channel);
264	}
265	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
266		lch, csdp, ccr, s_addr, d_addr);
267
268	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
269	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
270	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
271	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
272	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
273						gdd + SSI_GDD_CEN_REG(lch));
274
275	spin_lock_bh(&omap_ssi->lock);
276	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
277	tmp |= SSI_GDD_LCH(lch);
278	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
279	spin_unlock_bh(&omap_ssi->lock);
280	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
281	msg->status = HSI_STATUS_PROCEEDING;
282
283	return 0;
284}
285
286static int ssi_start_pio(struct hsi_msg *msg)
287{
288	struct hsi_port *port = hsi_get_port(msg->cl);
289	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
290	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
291	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
292	u32 val;
293
294	pm_runtime_get(omap_port->pdev);
295
296	if (!pm_runtime_active(omap_port->pdev)) {
297		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
298		pm_runtime_put_autosuspend(omap_port->pdev);
299		return -EREMOTEIO;
300	}
301
302	if (msg->ttype == HSI_MSG_WRITE) {
303		val = SSI_DATAACCEPT(msg->channel);
304		/* Hold clocks for pio writes */
305		pm_runtime_get(omap_port->pdev);
306	} else {
307		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
308	}
309	dev_dbg(&port->device, "Single %s transfer\n",
310						msg->ttype ? "write" : "read");
311	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
312	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
313	pm_runtime_put_autosuspend(omap_port->pdev);
314	msg->actual_len = 0;
315	msg->status = HSI_STATUS_PROCEEDING;
316
317	return 0;
318}
319
320static int ssi_start_transfer(struct list_head *queue)
321{
322	struct hsi_msg *msg;
323	int lch = -1;
324
325	if (list_empty(queue))
326		return 0;
327	msg = list_first_entry(queue, struct hsi_msg, link);
328	if (msg->status != HSI_STATUS_QUEUED)
329		return 0;
330	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
331		lch = ssi_claim_lch(msg);
332	if (lch >= 0)
333		return ssi_start_dma(msg, lch);
334	else
335		return ssi_start_pio(msg);
336}
337
338static int ssi_async_break(struct hsi_msg *msg)
339{
340	struct hsi_port *port = hsi_get_port(msg->cl);
341	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
342	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
343	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
344	int err = 0;
345	u32 tmp;
346
347	pm_runtime_get_sync(omap_port->pdev);
348	if (msg->ttype == HSI_MSG_WRITE) {
349		if (omap_port->sst.mode != SSI_MODE_FRAME) {
350			err = -EINVAL;
351			goto out;
352		}
353		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
354		msg->status = HSI_STATUS_COMPLETED;
355		msg->complete(msg);
356	} else {
357		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
358			err = -EINVAL;
359			goto out;
360		}
361		spin_lock_bh(&omap_port->lock);
362		tmp = readl(omap_ssi->sys +
363					SSI_MPU_ENABLE_REG(port->num, 0));
364		writel(tmp | SSI_BREAKDETECTED,
365			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
366		msg->status = HSI_STATUS_PROCEEDING;
367		list_add_tail(&msg->link, &omap_port->brkqueue);
368		spin_unlock_bh(&omap_port->lock);
369	}
370out:
371	pm_runtime_mark_last_busy(omap_port->pdev);
372	pm_runtime_put_autosuspend(omap_port->pdev);
373
374	return err;
375}
376
377static int ssi_async(struct hsi_msg *msg)
378{
379	struct hsi_port *port = hsi_get_port(msg->cl);
380	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
381	struct list_head *queue;
382	int err = 0;
383
384	BUG_ON(!msg);
385
386	if (msg->sgt.nents > 1)
387		return -ENOSYS; /* TODO: Add sg support */
388
389	if (msg->break_frame)
390		return ssi_async_break(msg);
391
392	if (msg->ttype) {
393		BUG_ON(msg->channel >= omap_port->sst.channels);
394		queue = &omap_port->txqueue[msg->channel];
395	} else {
396		BUG_ON(msg->channel >= omap_port->ssr.channels);
397		queue = &omap_port->rxqueue[msg->channel];
398	}
399	msg->status = HSI_STATUS_QUEUED;
400
401	pm_runtime_get_sync(omap_port->pdev);
402	spin_lock_bh(&omap_port->lock);
403	list_add_tail(&msg->link, queue);
404	err = ssi_start_transfer(queue);
405	if (err < 0) {
406		list_del(&msg->link);
407		msg->status = HSI_STATUS_ERROR;
408	}
409	spin_unlock_bh(&omap_port->lock);
410	pm_runtime_mark_last_busy(omap_port->pdev);
411	pm_runtime_put_autosuspend(omap_port->pdev);
412	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
413				msg->status, msg->ttype, msg->channel);
414
415	return err;
416}
417
418static u32 ssi_calculate_div(struct hsi_controller *ssi)
419{
420	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
421	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
422
423	/* / 2 : SSI TX clock is always half of the SSI functional clock */
424	tx_fckrate >>= 1;
425	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
426	tx_fckrate--;
427	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
428		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
429		omap_ssi->max_speed);
430
431	return tx_fckrate / omap_ssi->max_speed;
432}
433
434static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
435{
436	struct list_head *node, *tmp;
437	struct hsi_msg *msg;
438
439	list_for_each_safe(node, tmp, queue) {
440		msg = list_entry(node, struct hsi_msg, link);
441		if ((cl) && (cl != msg->cl))
442			continue;
443		list_del(node);
444		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
445			msg->channel, msg, msg->sgt.sgl->length,
446					msg->ttype, msg->context);
447		if (msg->destructor)
448			msg->destructor(msg);
449		else
450			hsi_free_msg(msg);
451	}
452}
453
454static int ssi_setup(struct hsi_client *cl)
455{
456	struct hsi_port *port = to_hsi_port(cl->device.parent);
457	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
458	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
459	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
460	void __iomem *sst = omap_port->sst_base;
461	void __iomem *ssr = omap_port->ssr_base;
462	u32 div;
463	u32 val;
464	int err = 0;
465
466	pm_runtime_get_sync(omap_port->pdev);
467	spin_lock_bh(&omap_port->lock);
468	if (cl->tx_cfg.speed)
469		omap_ssi->max_speed = cl->tx_cfg.speed;
470	div = ssi_calculate_div(ssi);
471	if (div > SSI_MAX_DIVISOR) {
472		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
473						cl->tx_cfg.speed, div);
474		err = -EINVAL;
475		goto out;
476	}
477	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
478	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
479	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
480	/* Flush posted write */
481	val = readl(ssr + SSI_SSR_MODE_REG);
482	/* TX */
483	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
484	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
485	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
486	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
487	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
488	/* RX */
489	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
490	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
491	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
492	/* Cleanup the break queue if we leave FRAME mode */
493	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
494		(cl->rx_cfg.mode != SSI_MODE_FRAME))
495		ssi_flush_queue(&omap_port->brkqueue, cl);
496	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
497	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
498				  cl->tx_cfg.num_hw_channels);
499	/* Shadow registering for OFF mode */
500	/* SST */
501	omap_port->sst.divisor = div;
502	omap_port->sst.frame_size = 31;
503	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
504	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
505	omap_port->sst.mode = cl->tx_cfg.mode;
506	/* SSR */
507	omap_port->ssr.frame_size = 31;
508	omap_port->ssr.timeout = 0;
509	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
510	omap_port->ssr.mode = cl->rx_cfg.mode;
511out:
512	spin_unlock_bh(&omap_port->lock);
513	pm_runtime_mark_last_busy(omap_port->pdev);
514	pm_runtime_put_autosuspend(omap_port->pdev);
515
516	return err;
517}
518
519static int ssi_flush(struct hsi_client *cl)
520{
521	struct hsi_port *port = hsi_get_port(cl);
522	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
523	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
524	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
525	struct hsi_msg *msg;
526	void __iomem *sst = omap_port->sst_base;
527	void __iomem *ssr = omap_port->ssr_base;
528	unsigned int i;
529	u32 err;
530
531	pm_runtime_get_sync(omap_port->pdev);
532	spin_lock_bh(&omap_port->lock);
533
534	/* stop all ssi communication */
535	pinctrl_pm_select_idle_state(omap_port->pdev);
536	udelay(1); /* wait for racing frames */
537
538	/* Stop all DMA transfers */
539	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
540		msg = omap_ssi->gdd_trn[i].msg;
541		if (!msg || (port != hsi_get_port(msg->cl)))
542			continue;
543		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
544		if (msg->ttype == HSI_MSG_READ)
545			pm_runtime_put_autosuspend(omap_port->pdev);
546		omap_ssi->gdd_trn[i].msg = NULL;
547	}
548	/* Flush all SST buffers */
549	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
550	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
551	/* Flush all SSR buffers */
552	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
553	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
554	/* Flush all errors */
555	err = readl(ssr + SSI_SSR_ERROR_REG);
556	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
557	/* Flush break */
558	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
559	/* Clear interrupts */
560	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
561	writel_relaxed(0xffffff00,
562			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
563	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
564	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
565	/* Dequeue all pending requests */
566	for (i = 0; i < omap_port->channels; i++) {
567		/* Release write clocks */
568		if (!list_empty(&omap_port->txqueue[i]))
569			pm_runtime_put_autosuspend(omap_port->pdev);
570		ssi_flush_queue(&omap_port->txqueue[i], NULL);
571		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
572	}
573	ssi_flush_queue(&omap_port->brkqueue, NULL);
574
575	/* Resume SSI communication */
576	pinctrl_pm_select_default_state(omap_port->pdev);
577
578	spin_unlock_bh(&omap_port->lock);
579	pm_runtime_mark_last_busy(omap_port->pdev);
580	pm_runtime_put_autosuspend(omap_port->pdev);
581
582	return 0;
583}
584
585static void start_tx_work(struct work_struct *work)
586{
587	struct omap_ssi_port *omap_port =
588				container_of(work, struct omap_ssi_port, work);
589	struct hsi_port *port = to_hsi_port(omap_port->dev);
590	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
591	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
592
593	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
594	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
595}
596
597static int ssi_start_tx(struct hsi_client *cl)
598{
599	struct hsi_port *port = hsi_get_port(cl);
600	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
601
602	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
603
604	spin_lock_bh(&omap_port->wk_lock);
605	if (omap_port->wk_refcount++) {
606		spin_unlock_bh(&omap_port->wk_lock);
607		return 0;
608	}
609	spin_unlock_bh(&omap_port->wk_lock);
610
611	schedule_work(&omap_port->work);
612
613	return 0;
614}
615
616static int ssi_stop_tx(struct hsi_client *cl)
617{
618	struct hsi_port *port = hsi_get_port(cl);
619	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
620	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
621	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
622
623	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
624
625	spin_lock_bh(&omap_port->wk_lock);
626	BUG_ON(!omap_port->wk_refcount);
627	if (--omap_port->wk_refcount) {
628		spin_unlock_bh(&omap_port->wk_lock);
629		return 0;
630	}
631	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
632	spin_unlock_bh(&omap_port->wk_lock);
633
634	pm_runtime_mark_last_busy(omap_port->pdev);
635	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
636
637
638	return 0;
639}
640
641static void ssi_transfer(struct omap_ssi_port *omap_port,
642							struct list_head *queue)
643{
644	struct hsi_msg *msg;
645	int err = -1;
646
647	pm_runtime_get(omap_port->pdev);
648	spin_lock_bh(&omap_port->lock);
649	while (err < 0) {
650		err = ssi_start_transfer(queue);
651		if (err < 0) {
652			msg = list_first_entry(queue, struct hsi_msg, link);
653			msg->status = HSI_STATUS_ERROR;
654			msg->actual_len = 0;
655			list_del(&msg->link);
656			spin_unlock_bh(&omap_port->lock);
657			msg->complete(msg);
658			spin_lock_bh(&omap_port->lock);
659		}
660	}
661	spin_unlock_bh(&omap_port->lock);
662	pm_runtime_mark_last_busy(omap_port->pdev);
663	pm_runtime_put_autosuspend(omap_port->pdev);
664}
665
666static void ssi_cleanup_queues(struct hsi_client *cl)
667{
668	struct hsi_port *port = hsi_get_port(cl);
669	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
670	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
671	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
672	struct hsi_msg *msg;
673	unsigned int i;
674	u32 rxbufstate = 0;
675	u32 txbufstate = 0;
676	u32 status = SSI_ERROROCCURED;
677	u32 tmp;
678
679	ssi_flush_queue(&omap_port->brkqueue, cl);
680	if (list_empty(&omap_port->brkqueue))
681		status |= SSI_BREAKDETECTED;
682
683	for (i = 0; i < omap_port->channels; i++) {
684		if (list_empty(&omap_port->txqueue[i]))
685			continue;
686		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
687									link);
688		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
689			txbufstate |= (1 << i);
690			status |= SSI_DATAACCEPT(i);
691			/* Release the clocks writes, also GDD ones */
692			pm_runtime_mark_last_busy(omap_port->pdev);
693			pm_runtime_put_autosuspend(omap_port->pdev);
694		}
695		ssi_flush_queue(&omap_port->txqueue[i], cl);
696	}
697	for (i = 0; i < omap_port->channels; i++) {
698		if (list_empty(&omap_port->rxqueue[i]))
699			continue;
700		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
701									link);
702		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
703			rxbufstate |= (1 << i);
704			status |= SSI_DATAAVAILABLE(i);
705		}
706		ssi_flush_queue(&omap_port->rxqueue[i], cl);
707		/* Check if we keep the error detection interrupt armed */
708		if (!list_empty(&omap_port->rxqueue[i]))
709			status &= ~SSI_ERROROCCURED;
710	}
711	/* Cleanup write buffers */
712	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
713	tmp &= ~txbufstate;
714	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
715	/* Cleanup read buffers */
716	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
717	tmp &= ~rxbufstate;
718	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
719	/* Disarm and ack pending interrupts */
720	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
721	tmp &= ~status;
722	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
723	writel_relaxed(status, omap_ssi->sys +
724		SSI_MPU_STATUS_REG(port->num, 0));
725}
726
727static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
728{
729	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
730	struct hsi_port *port = hsi_get_port(cl);
731	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
732	struct hsi_msg *msg;
733	unsigned int i;
734	u32 val = 0;
735	u32 tmp;
736
737	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
738		msg = omap_ssi->gdd_trn[i].msg;
739		if ((!msg) || (msg->cl != cl))
740			continue;
741		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
742		val |= (1 << i);
743		/*
744		 * Clock references for write will be handled in
745		 * ssi_cleanup_queues
746		 */
747		if (msg->ttype == HSI_MSG_READ) {
748			pm_runtime_mark_last_busy(omap_port->pdev);
749			pm_runtime_put_autosuspend(omap_port->pdev);
750		}
751		omap_ssi->gdd_trn[i].msg = NULL;
752	}
753	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
754	tmp &= ~val;
755	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
756	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
757}
758
759static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
760{
761	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
762	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
763	/* OCP barrier */
764	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
765
766	return 0;
767}
768
769static int ssi_release(struct hsi_client *cl)
770{
771	struct hsi_port *port = hsi_get_port(cl);
772	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
773	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
774
775	pm_runtime_get_sync(omap_port->pdev);
776	spin_lock_bh(&omap_port->lock);
777	/* Stop all the pending DMA requests for that client */
778	ssi_cleanup_gdd(ssi, cl);
779	/* Now cleanup all the queues */
780	ssi_cleanup_queues(cl);
781	/* If it is the last client of the port, do extra checks and cleanup */
782	if (port->claimed <= 1) {
783		/*
784		 * Drop the clock reference for the incoming wake line
785		 * if it is still kept high by the other side.
786		 */
787		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
788			pm_runtime_put_sync(omap_port->pdev);
789		pm_runtime_get(omap_port->pdev);
790		/* Stop any SSI TX/RX without a client */
791		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
792		omap_port->sst.mode = SSI_MODE_SLEEP;
793		omap_port->ssr.mode = SSI_MODE_SLEEP;
794		pm_runtime_put(omap_port->pdev);
795		WARN_ON(omap_port->wk_refcount != 0);
796	}
797	spin_unlock_bh(&omap_port->lock);
798	pm_runtime_put_sync(omap_port->pdev);
799
800	return 0;
801}
802
803
804
805static void ssi_error(struct hsi_port *port)
806{
807	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
808	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
809	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
810	struct hsi_msg *msg;
811	unsigned int i;
812	u32 err;
813	u32 val;
814	u32 tmp;
815
816	/* ACK error */
817	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
818	dev_err(&port->device, "SSI error: 0x%02x\n", err);
819	if (!err) {
820		dev_dbg(&port->device, "spurious SSI error ignored!\n");
821		return;
822	}
823	spin_lock(&omap_ssi->lock);
824	/* Cancel all GDD read transfers */
825	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
826		msg = omap_ssi->gdd_trn[i].msg;
827		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
828			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
829			val |= (1 << i);
830			omap_ssi->gdd_trn[i].msg = NULL;
831		}
832	}
833	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
834	tmp &= ~val;
835	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
836	spin_unlock(&omap_ssi->lock);
837	/* Cancel all PIO read transfers */
838	spin_lock(&omap_port->lock);
839	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
840	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
841	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
842	/* ACK error */
843	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
844	writel_relaxed(SSI_ERROROCCURED,
845			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
846	/* Signal the error all current pending read requests */
847	for (i = 0; i < omap_port->channels; i++) {
848		if (list_empty(&omap_port->rxqueue[i]))
849			continue;
850		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
851									link);
852		list_del(&msg->link);
853		msg->status = HSI_STATUS_ERROR;
854		spin_unlock(&omap_port->lock);
855		msg->complete(msg);
856		/* Now restart queued reads if any */
857		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
858		spin_lock(&omap_port->lock);
859	}
860	spin_unlock(&omap_port->lock);
861}
862
863static void ssi_break_complete(struct hsi_port *port)
864{
865	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
866	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
867	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
868	struct hsi_msg *msg;
869	struct hsi_msg *tmp;
870	u32 val;
871
872	dev_dbg(&port->device, "HWBREAK received\n");
873
874	spin_lock(&omap_port->lock);
875	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
876	val &= ~SSI_BREAKDETECTED;
877	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
878	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
879	writel(SSI_BREAKDETECTED,
880			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
881	spin_unlock(&omap_port->lock);
882
883	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
884		msg->status = HSI_STATUS_COMPLETED;
885		spin_lock(&omap_port->lock);
886		list_del(&msg->link);
887		spin_unlock(&omap_port->lock);
888		msg->complete(msg);
889	}
890
891}
892
893static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
894{
895	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
896	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
897	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
898	struct hsi_msg *msg;
899	u32 *buf;
900	u32 reg;
901	u32 val;
902
903	spin_lock_bh(&omap_port->lock);
904	msg = list_first_entry(queue, struct hsi_msg, link);
905	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
906		msg->actual_len = 0;
907		msg->status = HSI_STATUS_PENDING;
908	}
909	if (msg->ttype == HSI_MSG_WRITE)
910		val = SSI_DATAACCEPT(msg->channel);
911	else
912		val = SSI_DATAAVAILABLE(msg->channel);
913	if (msg->status == HSI_STATUS_PROCEEDING) {
914		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
915		if (msg->ttype == HSI_MSG_WRITE)
916			writel(*buf, omap_port->sst_base +
917					SSI_SST_BUFFER_CH_REG(msg->channel));
918		 else
919			*buf = readl(omap_port->ssr_base +
920					SSI_SSR_BUFFER_CH_REG(msg->channel));
921		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
922							msg->ttype, *buf);
923		msg->actual_len += sizeof(*buf);
924		if (msg->actual_len >= msg->sgt.sgl->length)
925			msg->status = HSI_STATUS_COMPLETED;
926		/*
927		 * Wait for the last written frame to be really sent before
928		 * we call the complete callback
929		 */
930		if ((msg->status == HSI_STATUS_PROCEEDING) ||
931				((msg->status == HSI_STATUS_COMPLETED) &&
932					(msg->ttype == HSI_MSG_WRITE))) {
933			writel(val, omap_ssi->sys +
934					SSI_MPU_STATUS_REG(port->num, 0));
935			spin_unlock_bh(&omap_port->lock);
936
937			return;
938		}
939
940	}
941	/* Transfer completed at this point */
942	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
943	if (msg->ttype == HSI_MSG_WRITE) {
944		/* Release clocks for write transfer */
945		pm_runtime_mark_last_busy(omap_port->pdev);
946		pm_runtime_put_autosuspend(omap_port->pdev);
947	}
948	reg &= ~val;
949	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
950	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
951	list_del(&msg->link);
952	spin_unlock_bh(&omap_port->lock);
953	msg->complete(msg);
954	ssi_transfer(omap_port, queue);
955}
956
957static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
958{
959	struct hsi_port *port = (struct hsi_port *)ssi_port;
960	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
961	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
962	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
963	void __iomem *sys = omap_ssi->sys;
964	unsigned int ch;
965	u32 status_reg;
966
967	pm_runtime_get_sync(omap_port->pdev);
968
969	do {
970		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
971		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
972
973		for (ch = 0; ch < omap_port->channels; ch++) {
974			if (status_reg & SSI_DATAACCEPT(ch))
975				ssi_pio_complete(port, &omap_port->txqueue[ch]);
976			if (status_reg & SSI_DATAAVAILABLE(ch))
977				ssi_pio_complete(port, &omap_port->rxqueue[ch]);
978		}
979		if (status_reg & SSI_BREAKDETECTED)
980			ssi_break_complete(port);
981		if (status_reg & SSI_ERROROCCURED)
982			ssi_error(port);
983
984		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
985		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
986
987		/* TODO: sleep if we retry? */
988	} while (status_reg);
989
990	pm_runtime_mark_last_busy(omap_port->pdev);
991	pm_runtime_put_autosuspend(omap_port->pdev);
992
993	return IRQ_HANDLED;
994}
995
996static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
997{
998	struct hsi_port *port = (struct hsi_port *)ssi_port;
999	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1000	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1001	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1002
1003	if (ssi_wakein(port)) {
1004		/**
1005		 * We can have a quick High-Low-High transition in the line.
1006		 * In such a case if we have long interrupt latencies,
1007		 * we can miss the low event or get twice a high event.
1008		 * This workaround will avoid breaking the clock reference
1009		 * count when such a situation ocurrs.
1010		 */
1011		if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1012			pm_runtime_get_sync(omap_port->pdev);
1013		dev_dbg(&ssi->device, "Wake in high\n");
1014		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1015			writel(SSI_WAKE(0),
1016				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1017		}
1018		hsi_event(port, HSI_EVENT_START_RX);
1019	} else {
1020		dev_dbg(&ssi->device, "Wake in low\n");
1021		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1022			writel(SSI_WAKE(0),
1023				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1024		}
1025		hsi_event(port, HSI_EVENT_STOP_RX);
1026		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1027			pm_runtime_mark_last_busy(omap_port->pdev);
1028			pm_runtime_put_autosuspend(omap_port->pdev);
1029		}
1030	}
1031
1032	return IRQ_HANDLED;
1033}
1034
1035static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1036{
1037	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1038	int err;
1039
1040	err = platform_get_irq(pd, 0);
1041	if (err < 0)
1042		return err;
1043	omap_port->irq = err;
1044	err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1045				ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1046	if (err < 0)
1047		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1048							omap_port->irq, err);
1049	return err;
1050}
1051
1052static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1053{
1054	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1055	int cawake_irq;
1056	int err;
1057
1058	if (!omap_port->wake_gpio) {
1059		omap_port->wake_irq = -1;
1060		return 0;
1061	}
1062
1063	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1064	omap_port->wake_irq = cawake_irq;
1065
1066	err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1067		ssi_wake_thread,
1068		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1069		"SSI cawake", port);
1070	if (err < 0)
1071		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1072						cawake_irq, err);
1073	err = enable_irq_wake(cawake_irq);
1074	if (err < 0)
1075		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1076			cawake_irq, err);
1077
1078	return err;
1079}
1080
1081static void ssi_queues_init(struct omap_ssi_port *omap_port)
1082{
1083	unsigned int ch;
1084
1085	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1086		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1087		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1088	}
1089	INIT_LIST_HEAD(&omap_port->brkqueue);
1090}
1091
1092static int ssi_port_get_iomem(struct platform_device *pd,
1093		const char *name, void __iomem **pbase, dma_addr_t *phy)
1094{
1095	struct hsi_port *port = platform_get_drvdata(pd);
1096	struct resource *mem;
1097	struct resource *ioarea;
1098	void __iomem *base;
1099
1100	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1101	if (!mem) {
1102		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1103		return -ENXIO;
1104	}
1105	ioarea = devm_request_mem_region(&port->device, mem->start,
1106					resource_size(mem), dev_name(&pd->dev));
1107	if (!ioarea) {
1108		dev_err(&pd->dev, "%s IO memory region request failed\n",
1109								mem->name);
1110		return -ENXIO;
1111	}
1112	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1113	if (!base) {
1114		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1115		return -ENXIO;
1116	}
1117	*pbase = base;
1118
1119	if (phy)
1120		*phy = mem->start;
1121
1122	return 0;
1123}
1124
1125static int ssi_port_probe(struct platform_device *pd)
1126{
1127	struct device_node *np = pd->dev.of_node;
1128	struct hsi_port *port;
1129	struct omap_ssi_port *omap_port;
1130	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1131	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1132	struct gpio_desc *cawake_gpio = NULL;
1133	u32 port_id;
1134	int err;
1135
1136	dev_dbg(&pd->dev, "init ssi port...\n");
1137
1138	if (!ssi->port || !omap_ssi->port) {
1139		dev_err(&pd->dev, "ssi controller not initialized!\n");
1140		err = -ENODEV;
1141		goto error;
1142	}
1143
1144	/* get id of first uninitialized port in controller */
1145	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1146		port_id++)
1147		;
1148
1149	if (port_id >= ssi->num_ports) {
1150		dev_err(&pd->dev, "port id out of range!\n");
1151		err = -ENODEV;
1152		goto error;
1153	}
1154
1155	port = ssi->port[port_id];
1156
1157	if (!np) {
1158		dev_err(&pd->dev, "missing device tree data\n");
1159		err = -EINVAL;
1160		goto error;
1161	}
1162
1163	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1164	if (IS_ERR(cawake_gpio)) {
1165		err = PTR_ERR(cawake_gpio);
1166		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1167		goto error;
1168	}
1169
1170	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1171	if (!omap_port) {
1172		err = -ENOMEM;
1173		goto error;
1174	}
1175	omap_port->wake_gpio = cawake_gpio;
1176	omap_port->pdev = &pd->dev;
1177	omap_port->port_id = port_id;
1178
1179	INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1180	INIT_WORK(&omap_port->work, start_tx_work);
1181
1182	/* initialize HSI port */
1183	port->async	= ssi_async;
1184	port->setup	= ssi_setup;
1185	port->flush	= ssi_flush;
1186	port->start_tx	= ssi_start_tx;
1187	port->stop_tx	= ssi_stop_tx;
1188	port->release	= ssi_release;
1189	hsi_port_set_drvdata(port, omap_port);
1190	omap_ssi->port[port_id] = omap_port;
1191
1192	platform_set_drvdata(pd, port);
1193
1194	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1195		&omap_port->sst_dma);
1196	if (err < 0)
1197		goto error;
1198	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1199		&omap_port->ssr_dma);
1200	if (err < 0)
1201		goto error;
1202
1203	err = ssi_port_irq(port, pd);
1204	if (err < 0)
1205		goto error;
1206	err = ssi_wake_irq(port, pd);
1207	if (err < 0)
1208		goto error;
1209
1210	ssi_queues_init(omap_port);
1211	spin_lock_init(&omap_port->lock);
1212	spin_lock_init(&omap_port->wk_lock);
1213	omap_port->dev = &port->device;
1214
1215	pm_runtime_use_autosuspend(omap_port->pdev);
1216	pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1217	pm_runtime_enable(omap_port->pdev);
1218
1219#ifdef CONFIG_DEBUG_FS
1220	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1221	if (err < 0) {
1222		pm_runtime_disable(omap_port->pdev);
1223		goto error;
1224	}
1225#endif
1226
1227	hsi_add_clients_from_dt(port, np);
1228
1229	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1230
1231	return 0;
1232
1233error:
1234	return err;
1235}
1236
1237static int ssi_port_remove(struct platform_device *pd)
1238{
1239	struct hsi_port *port = platform_get_drvdata(pd);
1240	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1241	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1242	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1243
1244#ifdef CONFIG_DEBUG_FS
1245	ssi_debug_remove_port(port);
1246#endif
1247
1248	cancel_delayed_work_sync(&omap_port->errqueue_work);
1249
1250	hsi_port_unregister_clients(port);
1251
1252	port->async	= hsi_dummy_msg;
1253	port->setup	= hsi_dummy_cl;
1254	port->flush	= hsi_dummy_cl;
1255	port->start_tx	= hsi_dummy_cl;
1256	port->stop_tx	= hsi_dummy_cl;
1257	port->release	= hsi_dummy_cl;
1258
1259	omap_ssi->port[omap_port->port_id] = NULL;
1260	platform_set_drvdata(pd, NULL);
1261
1262	pm_runtime_dont_use_autosuspend(&pd->dev);
1263	pm_runtime_disable(&pd->dev);
1264
1265	return 0;
1266}
1267
1268static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1269{
1270	writel_relaxed(omap_port->sst.divisor,
1271				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1272
1273	return 0;
1274}
1275
1276void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1277			       struct omap_ssi_port *omap_port)
1278{
1279	/* update divisor */
1280	u32 div = ssi_calculate_div(ssi);
1281	omap_port->sst.divisor = div;
1282	ssi_restore_divisor(omap_port);
1283}
1284
1285#ifdef CONFIG_PM
1286static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1287{
1288	struct hsi_port *port = to_hsi_port(omap_port->dev);
1289	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1290	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1291
1292	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1293					SSI_MPU_ENABLE_REG(port->num, 0));
1294
1295	return 0;
1296}
1297
1298static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1299{
1300	struct hsi_port *port = to_hsi_port(omap_port->dev);
1301	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1302	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1303	void __iomem	*base;
1304
1305	writel_relaxed(omap_port->sys_mpu_enable,
1306			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1307
1308	/* SST context */
1309	base = omap_port->sst_base;
1310	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1311	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1312	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1313
1314	/* SSR context */
1315	base = omap_port->ssr_base;
1316	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1317	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1318	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1319
1320	return 0;
1321}
1322
1323static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1324{
1325	u32 mode;
1326
1327	writel_relaxed(omap_port->sst.mode,
1328				omap_port->sst_base + SSI_SST_MODE_REG);
1329	writel_relaxed(omap_port->ssr.mode,
1330				omap_port->ssr_base + SSI_SSR_MODE_REG);
1331	/* OCP barrier */
1332	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1333
1334	return 0;
1335}
1336
1337static int omap_ssi_port_runtime_suspend(struct device *dev)
1338{
1339	struct hsi_port *port = dev_get_drvdata(dev);
1340	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1341	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1342	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1343
1344	dev_dbg(dev, "port runtime suspend!\n");
1345
1346	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1347	if (omap_ssi->get_loss)
1348		omap_port->loss_count =
1349				omap_ssi->get_loss(ssi->device.parent);
1350	ssi_save_port_ctx(omap_port);
1351
1352	return 0;
1353}
1354
1355static int omap_ssi_port_runtime_resume(struct device *dev)
1356{
1357	struct hsi_port *port = dev_get_drvdata(dev);
1358	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1359	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1360	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1361
1362	dev_dbg(dev, "port runtime resume!\n");
1363
1364	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1365				omap_ssi->get_loss(ssi->device.parent)))
1366		goto mode; /* We always need to restore the mode & TX divisor */
1367
1368	ssi_restore_port_ctx(omap_port);
1369
1370mode:
1371	ssi_restore_divisor(omap_port);
1372	ssi_restore_port_mode(omap_port);
1373
1374	return 0;
1375}
1376
1377static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1378	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1379		omap_ssi_port_runtime_resume, NULL)
1380};
1381
1382#define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1383#else
1384#define DEV_PM_OPS     NULL
1385#endif
1386
1387
1388#ifdef CONFIG_OF
1389static const struct of_device_id omap_ssi_port_of_match[] = {
1390	{ .compatible = "ti,omap3-ssi-port", },
1391	{},
1392};
1393MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1394#else
1395#define omap_ssi_port_of_match NULL
1396#endif
1397
1398struct platform_driver ssi_port_pdriver = {
1399	.probe = ssi_port_probe,
1400	.remove	= ssi_port_remove,
1401	.driver	= {
1402		.name	= "omap_ssi_port",
1403		.of_match_table = omap_ssi_port_of_match,
1404		.pm	= DEV_PM_OPS,
1405	},
1406};
1407