1// SPDX-License-Identifier: GPL-2.0-only
2/* OMAP SSI port driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
5 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
6 *
7 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
8 */
9
10#include <linux/mod_devicetable.h>
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/pm_runtime.h>
14#include <linux/delay.h>
15
16#include <linux/gpio/consumer.h>
17#include <linux/pinctrl/consumer.h>
18#include <linux/debugfs.h>
19
20#include "omap_ssi_regs.h"
21#include "omap_ssi.h"
22
23static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
24{
25	return 0;
26}
27
28static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
29{
30	return 0;
31}
32
33static inline unsigned int ssi_wakein(struct hsi_port *port)
34{
35	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
36	return gpiod_get_value(omap_port->wake_gpio);
37}
38
39#ifdef CONFIG_DEBUG_FS
40static void ssi_debug_remove_port(struct hsi_port *port)
41{
42	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
43
44	debugfs_remove_recursive(omap_port->dir);
45}
46
47static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
48{
49	struct hsi_port *port = m->private;
50	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
51	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
52	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
53	void __iomem	*base = omap_ssi->sys;
54	unsigned int ch;
55
56	pm_runtime_get_sync(omap_port->pdev);
57	if (omap_port->wake_irq > 0)
58		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
59	seq_printf(m, "WAKE\t\t: 0x%08x\n",
60				readl(base + SSI_WAKE_REG(port->num)));
61	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
62			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
63	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
64			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
65	/* SST */
66	base = omap_port->sst_base;
67	seq_puts(m, "\nSST\n===\n");
68	seq_printf(m, "ID SST\t\t: 0x%08x\n",
69				readl(base + SSI_SST_ID_REG));
70	seq_printf(m, "MODE\t\t: 0x%08x\n",
71				readl(base + SSI_SST_MODE_REG));
72	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
73				readl(base + SSI_SST_FRAMESIZE_REG));
74	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
75				readl(base + SSI_SST_DIVISOR_REG));
76	seq_printf(m, "CHANNELS\t: 0x%08x\n",
77				readl(base + SSI_SST_CHANNELS_REG));
78	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
79				readl(base + SSI_SST_ARBMODE_REG));
80	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
81				readl(base + SSI_SST_TXSTATE_REG));
82	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
83				readl(base + SSI_SST_BUFSTATE_REG));
84	seq_printf(m, "BREAK\t\t: 0x%08x\n",
85				readl(base + SSI_SST_BREAK_REG));
86	for (ch = 0; ch < omap_port->channels; ch++) {
87		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
88				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
89	}
90	/* SSR */
91	base = omap_port->ssr_base;
92	seq_puts(m, "\nSSR\n===\n");
93	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
94				readl(base + SSI_SSR_ID_REG));
95	seq_printf(m, "MODE\t\t: 0x%08x\n",
96				readl(base + SSI_SSR_MODE_REG));
97	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
98				readl(base + SSI_SSR_FRAMESIZE_REG));
99	seq_printf(m, "CHANNELS\t: 0x%08x\n",
100				readl(base + SSI_SSR_CHANNELS_REG));
101	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
102				readl(base + SSI_SSR_TIMEOUT_REG));
103	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
104				readl(base + SSI_SSR_RXSTATE_REG));
105	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
106				readl(base + SSI_SSR_BUFSTATE_REG));
107	seq_printf(m, "BREAK\t\t: 0x%08x\n",
108				readl(base + SSI_SSR_BREAK_REG));
109	seq_printf(m, "ERROR\t\t: 0x%08x\n",
110				readl(base + SSI_SSR_ERROR_REG));
111	seq_printf(m, "ERRORACK\t: 0x%08x\n",
112				readl(base + SSI_SSR_ERRORACK_REG));
113	for (ch = 0; ch < omap_port->channels; ch++) {
114		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
115				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
116	}
117	pm_runtime_put_autosuspend(omap_port->pdev);
118
119	return 0;
120}
121
122DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
123
124static int ssi_div_get(void *data, u64 *val)
125{
126	struct hsi_port *port = data;
127	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
128
129	pm_runtime_get_sync(omap_port->pdev);
130	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
131	pm_runtime_put_autosuspend(omap_port->pdev);
132
133	return 0;
134}
135
136static int ssi_div_set(void *data, u64 val)
137{
138	struct hsi_port *port = data;
139	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
140
141	if (val > 127)
142		return -EINVAL;
143
144	pm_runtime_get_sync(omap_port->pdev);
145	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
146	omap_port->sst.divisor = val;
147	pm_runtime_put_autosuspend(omap_port->pdev);
148
149	return 0;
150}
151
152DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
153
154static void ssi_debug_add_port(struct omap_ssi_port *omap_port,
155				     struct dentry *dir)
156{
157	struct hsi_port *port = to_hsi_port(omap_port->dev);
158
159	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
160	omap_port->dir = dir;
161	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
162	dir = debugfs_create_dir("sst", dir);
163	debugfs_create_file_unsafe("divisor", 0644, dir, port,
164				   &ssi_sst_div_fops);
165}
166#endif
167
168static void ssi_process_errqueue(struct work_struct *work)
169{
170	struct omap_ssi_port *omap_port;
171	struct list_head *head, *tmp;
172	struct hsi_msg *msg;
173
174	omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
175
176	list_for_each_safe(head, tmp, &omap_port->errqueue) {
177		msg = list_entry(head, struct hsi_msg, link);
178		msg->complete(msg);
179		list_del(head);
180	}
181}
182
183static int ssi_claim_lch(struct hsi_msg *msg)
184{
185
186	struct hsi_port *port = hsi_get_port(msg->cl);
187	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
188	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
189	int lch;
190
191	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
192		if (!omap_ssi->gdd_trn[lch].msg) {
193			omap_ssi->gdd_trn[lch].msg = msg;
194			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
195			return lch;
196		}
197
198	return -EBUSY;
199}
200
201static int ssi_start_dma(struct hsi_msg *msg, int lch)
202{
203	struct hsi_port *port = hsi_get_port(msg->cl);
204	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
205	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
206	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
207	void __iomem *gdd = omap_ssi->gdd;
208	int err;
209	u16 csdp;
210	u16 ccr;
211	u32 s_addr;
212	u32 d_addr;
213	u32 tmp;
214
215	/* Hold clocks during the transfer */
216	pm_runtime_get(omap_port->pdev);
217
218	if (!pm_runtime_active(omap_port->pdev)) {
219		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
220		pm_runtime_put_autosuspend(omap_port->pdev);
221		return -EREMOTEIO;
222	}
223
224	if (msg->ttype == HSI_MSG_READ) {
225		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
226							DMA_FROM_DEVICE);
227		if (!err) {
228			dev_dbg(&ssi->device, "DMA map SG failed !\n");
229			pm_runtime_put_autosuspend(omap_port->pdev);
230			return -EIO;
231		}
232		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
233			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
234			SSI_DATA_TYPE_S32;
235		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
236		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
237			SSI_CCR_ENABLE;
238		s_addr = omap_port->ssr_dma +
239					SSI_SSR_BUFFER_CH_REG(msg->channel);
240		d_addr = sg_dma_address(msg->sgt.sgl);
241	} else {
242		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
243							DMA_TO_DEVICE);
244		if (!err) {
245			dev_dbg(&ssi->device, "DMA map SG failed !\n");
246			pm_runtime_put_autosuspend(omap_port->pdev);
247			return -EIO;
248		}
249		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
250			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
251			SSI_DATA_TYPE_S32;
252		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
253		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
254			SSI_CCR_ENABLE;
255		s_addr = sg_dma_address(msg->sgt.sgl);
256		d_addr = omap_port->sst_dma +
257					SSI_SST_BUFFER_CH_REG(msg->channel);
258	}
259	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
260		lch, csdp, ccr, s_addr, d_addr);
261
262	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
263	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
264	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
265	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
266	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
267						gdd + SSI_GDD_CEN_REG(lch));
268
269	spin_lock_bh(&omap_ssi->lock);
270	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
271	tmp |= SSI_GDD_LCH(lch);
272	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
273	spin_unlock_bh(&omap_ssi->lock);
274	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
275	msg->status = HSI_STATUS_PROCEEDING;
276
277	return 0;
278}
279
280static int ssi_start_pio(struct hsi_msg *msg)
281{
282	struct hsi_port *port = hsi_get_port(msg->cl);
283	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
284	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
285	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
286	u32 val;
287
288	pm_runtime_get(omap_port->pdev);
289
290	if (!pm_runtime_active(omap_port->pdev)) {
291		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
292		pm_runtime_put_autosuspend(omap_port->pdev);
293		return -EREMOTEIO;
294	}
295
296	if (msg->ttype == HSI_MSG_WRITE) {
297		val = SSI_DATAACCEPT(msg->channel);
298		/* Hold clocks for pio writes */
299		pm_runtime_get(omap_port->pdev);
300	} else {
301		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
302	}
303	dev_dbg(&port->device, "Single %s transfer\n",
304						msg->ttype ? "write" : "read");
305	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
306	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
307	pm_runtime_put_autosuspend(omap_port->pdev);
308	msg->actual_len = 0;
309	msg->status = HSI_STATUS_PROCEEDING;
310
311	return 0;
312}
313
314static int ssi_start_transfer(struct list_head *queue)
315{
316	struct hsi_msg *msg;
317	int lch = -1;
318
319	if (list_empty(queue))
320		return 0;
321	msg = list_first_entry(queue, struct hsi_msg, link);
322	if (msg->status != HSI_STATUS_QUEUED)
323		return 0;
324	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
325		lch = ssi_claim_lch(msg);
326	if (lch >= 0)
327		return ssi_start_dma(msg, lch);
328	else
329		return ssi_start_pio(msg);
330}
331
332static int ssi_async_break(struct hsi_msg *msg)
333{
334	struct hsi_port *port = hsi_get_port(msg->cl);
335	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
336	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
337	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
338	int err = 0;
339	u32 tmp;
340
341	pm_runtime_get_sync(omap_port->pdev);
342	if (msg->ttype == HSI_MSG_WRITE) {
343		if (omap_port->sst.mode != SSI_MODE_FRAME) {
344			err = -EINVAL;
345			goto out;
346		}
347		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
348		msg->status = HSI_STATUS_COMPLETED;
349		msg->complete(msg);
350	} else {
351		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
352			err = -EINVAL;
353			goto out;
354		}
355		spin_lock_bh(&omap_port->lock);
356		tmp = readl(omap_ssi->sys +
357					SSI_MPU_ENABLE_REG(port->num, 0));
358		writel(tmp | SSI_BREAKDETECTED,
359			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
360		msg->status = HSI_STATUS_PROCEEDING;
361		list_add_tail(&msg->link, &omap_port->brkqueue);
362		spin_unlock_bh(&omap_port->lock);
363	}
364out:
365	pm_runtime_mark_last_busy(omap_port->pdev);
366	pm_runtime_put_autosuspend(omap_port->pdev);
367
368	return err;
369}
370
371static int ssi_async(struct hsi_msg *msg)
372{
373	struct hsi_port *port = hsi_get_port(msg->cl);
374	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
375	struct list_head *queue;
376	int err = 0;
377
378	BUG_ON(!msg);
379
380	if (msg->sgt.nents > 1)
381		return -ENOSYS; /* TODO: Add sg support */
382
383	if (msg->break_frame)
384		return ssi_async_break(msg);
385
386	if (msg->ttype) {
387		BUG_ON(msg->channel >= omap_port->sst.channels);
388		queue = &omap_port->txqueue[msg->channel];
389	} else {
390		BUG_ON(msg->channel >= omap_port->ssr.channels);
391		queue = &omap_port->rxqueue[msg->channel];
392	}
393	msg->status = HSI_STATUS_QUEUED;
394
395	pm_runtime_get_sync(omap_port->pdev);
396	spin_lock_bh(&omap_port->lock);
397	list_add_tail(&msg->link, queue);
398	err = ssi_start_transfer(queue);
399	if (err < 0) {
400		list_del(&msg->link);
401		msg->status = HSI_STATUS_ERROR;
402	}
403	spin_unlock_bh(&omap_port->lock);
404	pm_runtime_mark_last_busy(omap_port->pdev);
405	pm_runtime_put_autosuspend(omap_port->pdev);
406	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
407				msg->status, msg->ttype, msg->channel);
408
409	return err;
410}
411
412static u32 ssi_calculate_div(struct hsi_controller *ssi)
413{
414	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
415	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
416
417	/* / 2 : SSI TX clock is always half of the SSI functional clock */
418	tx_fckrate >>= 1;
419	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
420	tx_fckrate--;
421	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
422		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
423		omap_ssi->max_speed);
424
425	return tx_fckrate / omap_ssi->max_speed;
426}
427
428static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
429{
430	struct list_head *node, *tmp;
431	struct hsi_msg *msg;
432
433	list_for_each_safe(node, tmp, queue) {
434		msg = list_entry(node, struct hsi_msg, link);
435		if ((cl) && (cl != msg->cl))
436			continue;
437		list_del(node);
438		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
439			msg->channel, msg, msg->sgt.sgl->length,
440					msg->ttype, msg->context);
441		if (msg->destructor)
442			msg->destructor(msg);
443		else
444			hsi_free_msg(msg);
445	}
446}
447
448static int ssi_setup(struct hsi_client *cl)
449{
450	struct hsi_port *port = to_hsi_port(cl->device.parent);
451	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
452	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
453	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
454	void __iomem *sst = omap_port->sst_base;
455	void __iomem *ssr = omap_port->ssr_base;
456	u32 div;
457	u32 val;
458	int err = 0;
459
460	pm_runtime_get_sync(omap_port->pdev);
461	spin_lock_bh(&omap_port->lock);
462	if (cl->tx_cfg.speed)
463		omap_ssi->max_speed = cl->tx_cfg.speed;
464	div = ssi_calculate_div(ssi);
465	if (div > SSI_MAX_DIVISOR) {
466		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
467						cl->tx_cfg.speed, div);
468		err = -EINVAL;
469		goto out;
470	}
471	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
472	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
473	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
474	/* Flush posted write */
475	val = readl(ssr + SSI_SSR_MODE_REG);
476	/* TX */
477	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
478	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
479	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
480	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
481	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
482	/* RX */
483	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
484	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
485	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
486	/* Cleanup the break queue if we leave FRAME mode */
487	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
488		(cl->rx_cfg.mode != SSI_MODE_FRAME))
489		ssi_flush_queue(&omap_port->brkqueue, cl);
490	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
491	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
492				  cl->tx_cfg.num_hw_channels);
493	/* Shadow registering for OFF mode */
494	/* SST */
495	omap_port->sst.divisor = div;
496	omap_port->sst.frame_size = 31;
497	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
498	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
499	omap_port->sst.mode = cl->tx_cfg.mode;
500	/* SSR */
501	omap_port->ssr.frame_size = 31;
502	omap_port->ssr.timeout = 0;
503	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
504	omap_port->ssr.mode = cl->rx_cfg.mode;
505out:
506	spin_unlock_bh(&omap_port->lock);
507	pm_runtime_mark_last_busy(omap_port->pdev);
508	pm_runtime_put_autosuspend(omap_port->pdev);
509
510	return err;
511}
512
513static int ssi_flush(struct hsi_client *cl)
514{
515	struct hsi_port *port = hsi_get_port(cl);
516	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
517	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
518	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
519	struct hsi_msg *msg;
520	void __iomem *sst = omap_port->sst_base;
521	void __iomem *ssr = omap_port->ssr_base;
522	unsigned int i;
523	u32 err;
524
525	pm_runtime_get_sync(omap_port->pdev);
526	spin_lock_bh(&omap_port->lock);
527
528	/* stop all ssi communication */
529	pinctrl_pm_select_idle_state(omap_port->pdev);
530	udelay(1); /* wait for racing frames */
531
532	/* Stop all DMA transfers */
533	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
534		msg = omap_ssi->gdd_trn[i].msg;
535		if (!msg || (port != hsi_get_port(msg->cl)))
536			continue;
537		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
538		if (msg->ttype == HSI_MSG_READ)
539			pm_runtime_put_autosuspend(omap_port->pdev);
540		omap_ssi->gdd_trn[i].msg = NULL;
541	}
542	/* Flush all SST buffers */
543	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
544	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
545	/* Flush all SSR buffers */
546	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
547	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
548	/* Flush all errors */
549	err = readl(ssr + SSI_SSR_ERROR_REG);
550	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
551	/* Flush break */
552	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
553	/* Clear interrupts */
554	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
555	writel_relaxed(0xffffff00,
556			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
557	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
558	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
559	/* Dequeue all pending requests */
560	for (i = 0; i < omap_port->channels; i++) {
561		/* Release write clocks */
562		if (!list_empty(&omap_port->txqueue[i]))
563			pm_runtime_put_autosuspend(omap_port->pdev);
564		ssi_flush_queue(&omap_port->txqueue[i], NULL);
565		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
566	}
567	ssi_flush_queue(&omap_port->brkqueue, NULL);
568
569	/* Resume SSI communication */
570	pinctrl_pm_select_default_state(omap_port->pdev);
571
572	spin_unlock_bh(&omap_port->lock);
573	pm_runtime_mark_last_busy(omap_port->pdev);
574	pm_runtime_put_autosuspend(omap_port->pdev);
575
576	return 0;
577}
578
579static void start_tx_work(struct work_struct *work)
580{
581	struct omap_ssi_port *omap_port =
582				container_of(work, struct omap_ssi_port, work);
583	struct hsi_port *port = to_hsi_port(omap_port->dev);
584	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
585	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
586
587	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
588	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
589}
590
591static int ssi_start_tx(struct hsi_client *cl)
592{
593	struct hsi_port *port = hsi_get_port(cl);
594	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
595
596	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
597
598	spin_lock_bh(&omap_port->wk_lock);
599	if (omap_port->wk_refcount++) {
600		spin_unlock_bh(&omap_port->wk_lock);
601		return 0;
602	}
603	spin_unlock_bh(&omap_port->wk_lock);
604
605	schedule_work(&omap_port->work);
606
607	return 0;
608}
609
610static int ssi_stop_tx(struct hsi_client *cl)
611{
612	struct hsi_port *port = hsi_get_port(cl);
613	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
614	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
615	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
616
617	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
618
619	spin_lock_bh(&omap_port->wk_lock);
620	BUG_ON(!omap_port->wk_refcount);
621	if (--omap_port->wk_refcount) {
622		spin_unlock_bh(&omap_port->wk_lock);
623		return 0;
624	}
625	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
626	spin_unlock_bh(&omap_port->wk_lock);
627
628	pm_runtime_mark_last_busy(omap_port->pdev);
629	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
630
631
632	return 0;
633}
634
635static void ssi_transfer(struct omap_ssi_port *omap_port,
636							struct list_head *queue)
637{
638	struct hsi_msg *msg;
639	int err = -1;
640
641	pm_runtime_get(omap_port->pdev);
642	spin_lock_bh(&omap_port->lock);
643	while (err < 0) {
644		err = ssi_start_transfer(queue);
645		if (err < 0) {
646			msg = list_first_entry(queue, struct hsi_msg, link);
647			msg->status = HSI_STATUS_ERROR;
648			msg->actual_len = 0;
649			list_del(&msg->link);
650			spin_unlock_bh(&omap_port->lock);
651			msg->complete(msg);
652			spin_lock_bh(&omap_port->lock);
653		}
654	}
655	spin_unlock_bh(&omap_port->lock);
656	pm_runtime_mark_last_busy(omap_port->pdev);
657	pm_runtime_put_autosuspend(omap_port->pdev);
658}
659
660static void ssi_cleanup_queues(struct hsi_client *cl)
661{
662	struct hsi_port *port = hsi_get_port(cl);
663	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
664	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
665	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
666	struct hsi_msg *msg;
667	unsigned int i;
668	u32 rxbufstate = 0;
669	u32 txbufstate = 0;
670	u32 status = SSI_ERROROCCURED;
671	u32 tmp;
672
673	ssi_flush_queue(&omap_port->brkqueue, cl);
674	if (list_empty(&omap_port->brkqueue))
675		status |= SSI_BREAKDETECTED;
676
677	for (i = 0; i < omap_port->channels; i++) {
678		if (list_empty(&omap_port->txqueue[i]))
679			continue;
680		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
681									link);
682		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
683			txbufstate |= (1 << i);
684			status |= SSI_DATAACCEPT(i);
685			/* Release the clocks writes, also GDD ones */
686			pm_runtime_mark_last_busy(omap_port->pdev);
687			pm_runtime_put_autosuspend(omap_port->pdev);
688		}
689		ssi_flush_queue(&omap_port->txqueue[i], cl);
690	}
691	for (i = 0; i < omap_port->channels; i++) {
692		if (list_empty(&omap_port->rxqueue[i]))
693			continue;
694		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
695									link);
696		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
697			rxbufstate |= (1 << i);
698			status |= SSI_DATAAVAILABLE(i);
699		}
700		ssi_flush_queue(&omap_port->rxqueue[i], cl);
701		/* Check if we keep the error detection interrupt armed */
702		if (!list_empty(&omap_port->rxqueue[i]))
703			status &= ~SSI_ERROROCCURED;
704	}
705	/* Cleanup write buffers */
706	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
707	tmp &= ~txbufstate;
708	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
709	/* Cleanup read buffers */
710	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
711	tmp &= ~rxbufstate;
712	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
713	/* Disarm and ack pending interrupts */
714	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
715	tmp &= ~status;
716	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
717	writel_relaxed(status, omap_ssi->sys +
718		SSI_MPU_STATUS_REG(port->num, 0));
719}
720
721static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
722{
723	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
724	struct hsi_port *port = hsi_get_port(cl);
725	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
726	struct hsi_msg *msg;
727	unsigned int i;
728	u32 val = 0;
729	u32 tmp;
730
731	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
732		msg = omap_ssi->gdd_trn[i].msg;
733		if ((!msg) || (msg->cl != cl))
734			continue;
735		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
736		val |= (1 << i);
737		/*
738		 * Clock references for write will be handled in
739		 * ssi_cleanup_queues
740		 */
741		if (msg->ttype == HSI_MSG_READ) {
742			pm_runtime_mark_last_busy(omap_port->pdev);
743			pm_runtime_put_autosuspend(omap_port->pdev);
744		}
745		omap_ssi->gdd_trn[i].msg = NULL;
746	}
747	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
748	tmp &= ~val;
749	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
750	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
751}
752
753static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
754{
755	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
756	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
757	/* OCP barrier */
758	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
759
760	return 0;
761}
762
763static int ssi_release(struct hsi_client *cl)
764{
765	struct hsi_port *port = hsi_get_port(cl);
766	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
767	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
768
769	pm_runtime_get_sync(omap_port->pdev);
770	spin_lock_bh(&omap_port->lock);
771	/* Stop all the pending DMA requests for that client */
772	ssi_cleanup_gdd(ssi, cl);
773	/* Now cleanup all the queues */
774	ssi_cleanup_queues(cl);
775	/* If it is the last client of the port, do extra checks and cleanup */
776	if (port->claimed <= 1) {
777		/*
778		 * Drop the clock reference for the incoming wake line
779		 * if it is still kept high by the other side.
780		 */
781		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
782			pm_runtime_put_sync(omap_port->pdev);
783		pm_runtime_get(omap_port->pdev);
784		/* Stop any SSI TX/RX without a client */
785		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
786		omap_port->sst.mode = SSI_MODE_SLEEP;
787		omap_port->ssr.mode = SSI_MODE_SLEEP;
788		pm_runtime_put(omap_port->pdev);
789		WARN_ON(omap_port->wk_refcount != 0);
790	}
791	spin_unlock_bh(&omap_port->lock);
792	pm_runtime_put_sync(omap_port->pdev);
793
794	return 0;
795}
796
797
798
799static void ssi_error(struct hsi_port *port)
800{
801	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
802	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
803	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
804	struct hsi_msg *msg;
805	unsigned int i;
806	u32 err;
807	u32 val;
808	u32 tmp;
809
810	/* ACK error */
811	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
812	dev_err(&port->device, "SSI error: 0x%02x\n", err);
813	if (!err) {
814		dev_dbg(&port->device, "spurious SSI error ignored!\n");
815		return;
816	}
817	spin_lock(&omap_ssi->lock);
818	/* Cancel all GDD read transfers */
819	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
820		msg = omap_ssi->gdd_trn[i].msg;
821		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
822			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
823			val |= (1 << i);
824			omap_ssi->gdd_trn[i].msg = NULL;
825		}
826	}
827	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
828	tmp &= ~val;
829	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
830	spin_unlock(&omap_ssi->lock);
831	/* Cancel all PIO read transfers */
832	spin_lock(&omap_port->lock);
833	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
834	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
835	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
836	/* ACK error */
837	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
838	writel_relaxed(SSI_ERROROCCURED,
839			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
840	/* Signal the error all current pending read requests */
841	for (i = 0; i < omap_port->channels; i++) {
842		if (list_empty(&omap_port->rxqueue[i]))
843			continue;
844		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
845									link);
846		list_del(&msg->link);
847		msg->status = HSI_STATUS_ERROR;
848		spin_unlock(&omap_port->lock);
849		msg->complete(msg);
850		/* Now restart queued reads if any */
851		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
852		spin_lock(&omap_port->lock);
853	}
854	spin_unlock(&omap_port->lock);
855}
856
857static void ssi_break_complete(struct hsi_port *port)
858{
859	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
860	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
861	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
862	struct hsi_msg *msg;
863	struct hsi_msg *tmp;
864	u32 val;
865
866	dev_dbg(&port->device, "HWBREAK received\n");
867
868	spin_lock(&omap_port->lock);
869	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
870	val &= ~SSI_BREAKDETECTED;
871	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
872	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
873	writel(SSI_BREAKDETECTED,
874			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
875	spin_unlock(&omap_port->lock);
876
877	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
878		msg->status = HSI_STATUS_COMPLETED;
879		spin_lock(&omap_port->lock);
880		list_del(&msg->link);
881		spin_unlock(&omap_port->lock);
882		msg->complete(msg);
883	}
884
885}
886
887static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
888{
889	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
890	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
891	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
892	struct hsi_msg *msg;
893	u32 *buf;
894	u32 reg;
895	u32 val;
896
897	spin_lock_bh(&omap_port->lock);
898	msg = list_first_entry(queue, struct hsi_msg, link);
899	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
900		msg->actual_len = 0;
901		msg->status = HSI_STATUS_PENDING;
902	}
903	if (msg->ttype == HSI_MSG_WRITE)
904		val = SSI_DATAACCEPT(msg->channel);
905	else
906		val = SSI_DATAAVAILABLE(msg->channel);
907	if (msg->status == HSI_STATUS_PROCEEDING) {
908		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
909		if (msg->ttype == HSI_MSG_WRITE)
910			writel(*buf, omap_port->sst_base +
911					SSI_SST_BUFFER_CH_REG(msg->channel));
912		 else
913			*buf = readl(omap_port->ssr_base +
914					SSI_SSR_BUFFER_CH_REG(msg->channel));
915		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
916							msg->ttype, *buf);
917		msg->actual_len += sizeof(*buf);
918		if (msg->actual_len >= msg->sgt.sgl->length)
919			msg->status = HSI_STATUS_COMPLETED;
920		/*
921		 * Wait for the last written frame to be really sent before
922		 * we call the complete callback
923		 */
924		if ((msg->status == HSI_STATUS_PROCEEDING) ||
925				((msg->status == HSI_STATUS_COMPLETED) &&
926					(msg->ttype == HSI_MSG_WRITE))) {
927			writel(val, omap_ssi->sys +
928					SSI_MPU_STATUS_REG(port->num, 0));
929			spin_unlock_bh(&omap_port->lock);
930
931			return;
932		}
933
934	}
935	/* Transfer completed at this point */
936	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
937	if (msg->ttype == HSI_MSG_WRITE) {
938		/* Release clocks for write transfer */
939		pm_runtime_mark_last_busy(omap_port->pdev);
940		pm_runtime_put_autosuspend(omap_port->pdev);
941	}
942	reg &= ~val;
943	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
944	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
945	list_del(&msg->link);
946	spin_unlock_bh(&omap_port->lock);
947	msg->complete(msg);
948	ssi_transfer(omap_port, queue);
949}
950
951static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
952{
953	struct hsi_port *port = (struct hsi_port *)ssi_port;
954	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
955	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
956	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
957	void __iomem *sys = omap_ssi->sys;
958	unsigned int ch;
959	u32 status_reg;
960
961	pm_runtime_get_sync(omap_port->pdev);
962
963	do {
964		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
965		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
966
967		for (ch = 0; ch < omap_port->channels; ch++) {
968			if (status_reg & SSI_DATAACCEPT(ch))
969				ssi_pio_complete(port, &omap_port->txqueue[ch]);
970			if (status_reg & SSI_DATAAVAILABLE(ch))
971				ssi_pio_complete(port, &omap_port->rxqueue[ch]);
972		}
973		if (status_reg & SSI_BREAKDETECTED)
974			ssi_break_complete(port);
975		if (status_reg & SSI_ERROROCCURED)
976			ssi_error(port);
977
978		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
979		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
980
981		/* TODO: sleep if we retry? */
982	} while (status_reg);
983
984	pm_runtime_mark_last_busy(omap_port->pdev);
985	pm_runtime_put_autosuspend(omap_port->pdev);
986
987	return IRQ_HANDLED;
988}
989
990static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
991{
992	struct hsi_port *port = (struct hsi_port *)ssi_port;
993	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
994	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
995	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
996
997	if (ssi_wakein(port)) {
998		/**
999		 * We can have a quick High-Low-High transition in the line.
1000		 * In such a case if we have long interrupt latencies,
1001		 * we can miss the low event or get twice a high event.
1002		 * This workaround will avoid breaking the clock reference
1003		 * count when such a situation ocurrs.
1004		 */
1005		if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1006			pm_runtime_get_sync(omap_port->pdev);
1007		dev_dbg(&ssi->device, "Wake in high\n");
1008		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1009			writel(SSI_WAKE(0),
1010				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1011		}
1012		hsi_event(port, HSI_EVENT_START_RX);
1013	} else {
1014		dev_dbg(&ssi->device, "Wake in low\n");
1015		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1016			writel(SSI_WAKE(0),
1017				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1018		}
1019		hsi_event(port, HSI_EVENT_STOP_RX);
1020		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1021			pm_runtime_mark_last_busy(omap_port->pdev);
1022			pm_runtime_put_autosuspend(omap_port->pdev);
1023		}
1024	}
1025
1026	return IRQ_HANDLED;
1027}
1028
1029static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1030{
1031	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1032	int err;
1033
1034	err = platform_get_irq(pd, 0);
1035	if (err < 0)
1036		return err;
1037	omap_port->irq = err;
1038	err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1039				ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1040	if (err < 0)
1041		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1042							omap_port->irq, err);
1043	return err;
1044}
1045
1046static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1047{
1048	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1049	int cawake_irq;
1050	int err;
1051
1052	if (!omap_port->wake_gpio) {
1053		omap_port->wake_irq = -1;
1054		return 0;
1055	}
1056
1057	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1058	omap_port->wake_irq = cawake_irq;
1059
1060	err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1061		ssi_wake_thread,
1062		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1063		"SSI cawake", port);
1064	if (err < 0)
1065		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1066						cawake_irq, err);
1067	err = enable_irq_wake(cawake_irq);
1068	if (err < 0)
1069		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1070			cawake_irq, err);
1071
1072	return err;
1073}
1074
1075static void ssi_queues_init(struct omap_ssi_port *omap_port)
1076{
1077	unsigned int ch;
1078
1079	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1080		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1081		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1082	}
1083	INIT_LIST_HEAD(&omap_port->brkqueue);
1084}
1085
1086static int ssi_port_get_iomem(struct platform_device *pd,
1087		const char *name, void __iomem **pbase, dma_addr_t *phy)
1088{
1089	struct hsi_port *port = platform_get_drvdata(pd);
1090	struct resource *mem;
1091	struct resource *ioarea;
1092	void __iomem *base;
1093
1094	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1095	if (!mem) {
1096		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1097		return -ENXIO;
1098	}
1099	ioarea = devm_request_mem_region(&port->device, mem->start,
1100					resource_size(mem), dev_name(&pd->dev));
1101	if (!ioarea) {
1102		dev_err(&pd->dev, "%s IO memory region request failed\n",
1103								mem->name);
1104		return -ENXIO;
1105	}
1106	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1107	if (!base) {
1108		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1109		return -ENXIO;
1110	}
1111	*pbase = base;
1112
1113	if (phy)
1114		*phy = mem->start;
1115
1116	return 0;
1117}
1118
1119static int ssi_port_probe(struct platform_device *pd)
1120{
1121	struct device_node *np = pd->dev.of_node;
1122	struct hsi_port *port;
1123	struct omap_ssi_port *omap_port;
1124	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1125	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1126	struct gpio_desc *cawake_gpio = NULL;
1127	u32 port_id;
1128	int err;
1129
1130	dev_dbg(&pd->dev, "init ssi port...\n");
1131
1132	if (!ssi->port || !omap_ssi->port) {
1133		dev_err(&pd->dev, "ssi controller not initialized!\n");
1134		err = -ENODEV;
1135		goto error;
1136	}
1137
1138	/* get id of first uninitialized port in controller */
1139	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1140		port_id++)
1141		;
1142
1143	if (port_id >= ssi->num_ports) {
1144		dev_err(&pd->dev, "port id out of range!\n");
1145		err = -ENODEV;
1146		goto error;
1147	}
1148
1149	port = ssi->port[port_id];
1150
1151	if (!np) {
1152		dev_err(&pd->dev, "missing device tree data\n");
1153		err = -EINVAL;
1154		goto error;
1155	}
1156
1157	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1158	if (IS_ERR(cawake_gpio)) {
1159		err = PTR_ERR(cawake_gpio);
1160		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1161		goto error;
1162	}
1163
1164	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1165	if (!omap_port) {
1166		err = -ENOMEM;
1167		goto error;
1168	}
1169	omap_port->wake_gpio = cawake_gpio;
1170	omap_port->pdev = &pd->dev;
1171	omap_port->port_id = port_id;
1172
1173	INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1174	INIT_WORK(&omap_port->work, start_tx_work);
1175
1176	/* initialize HSI port */
1177	port->async	= ssi_async;
1178	port->setup	= ssi_setup;
1179	port->flush	= ssi_flush;
1180	port->start_tx	= ssi_start_tx;
1181	port->stop_tx	= ssi_stop_tx;
1182	port->release	= ssi_release;
1183	hsi_port_set_drvdata(port, omap_port);
1184	omap_ssi->port[port_id] = omap_port;
1185
1186	platform_set_drvdata(pd, port);
1187
1188	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1189		&omap_port->sst_dma);
1190	if (err < 0)
1191		goto error;
1192	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1193		&omap_port->ssr_dma);
1194	if (err < 0)
1195		goto error;
1196
1197	err = ssi_port_irq(port, pd);
1198	if (err < 0)
1199		goto error;
1200	err = ssi_wake_irq(port, pd);
1201	if (err < 0)
1202		goto error;
1203
1204	ssi_queues_init(omap_port);
1205	spin_lock_init(&omap_port->lock);
1206	spin_lock_init(&omap_port->wk_lock);
1207	omap_port->dev = &port->device;
1208
1209	pm_runtime_use_autosuspend(omap_port->pdev);
1210	pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1211	pm_runtime_enable(omap_port->pdev);
1212
1213#ifdef CONFIG_DEBUG_FS
1214	ssi_debug_add_port(omap_port, omap_ssi->dir);
1215#endif
1216
1217	hsi_add_clients_from_dt(port, np);
1218
1219	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1220
1221	return 0;
1222
1223error:
1224	return err;
1225}
1226
1227static int ssi_port_remove(struct platform_device *pd)
1228{
1229	struct hsi_port *port = platform_get_drvdata(pd);
1230	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1231	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1232	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1233
1234#ifdef CONFIG_DEBUG_FS
1235	ssi_debug_remove_port(port);
1236#endif
1237
1238	cancel_delayed_work_sync(&omap_port->errqueue_work);
1239
1240	hsi_port_unregister_clients(port);
1241
1242	port->async	= hsi_dummy_msg;
1243	port->setup	= hsi_dummy_cl;
1244	port->flush	= hsi_dummy_cl;
1245	port->start_tx	= hsi_dummy_cl;
1246	port->stop_tx	= hsi_dummy_cl;
1247	port->release	= hsi_dummy_cl;
1248
1249	omap_ssi->port[omap_port->port_id] = NULL;
1250	platform_set_drvdata(pd, NULL);
1251
1252	pm_runtime_dont_use_autosuspend(&pd->dev);
1253	pm_runtime_disable(&pd->dev);
1254
1255	return 0;
1256}
1257
1258static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1259{
1260	writel_relaxed(omap_port->sst.divisor,
1261				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1262
1263	return 0;
1264}
1265
1266void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1267			       struct omap_ssi_port *omap_port)
1268{
1269	/* update divisor */
1270	u32 div = ssi_calculate_div(ssi);
1271	omap_port->sst.divisor = div;
1272	ssi_restore_divisor(omap_port);
1273}
1274
1275#ifdef CONFIG_PM
1276static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1277{
1278	struct hsi_port *port = to_hsi_port(omap_port->dev);
1279	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1280	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1281
1282	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1283					SSI_MPU_ENABLE_REG(port->num, 0));
1284
1285	return 0;
1286}
1287
1288static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1289{
1290	struct hsi_port *port = to_hsi_port(omap_port->dev);
1291	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1292	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1293	void __iomem	*base;
1294
1295	writel_relaxed(omap_port->sys_mpu_enable,
1296			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1297
1298	/* SST context */
1299	base = omap_port->sst_base;
1300	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1301	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1302	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1303
1304	/* SSR context */
1305	base = omap_port->ssr_base;
1306	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1307	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1308	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1309
1310	return 0;
1311}
1312
1313static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1314{
1315	u32 mode;
1316
1317	writel_relaxed(omap_port->sst.mode,
1318				omap_port->sst_base + SSI_SST_MODE_REG);
1319	writel_relaxed(omap_port->ssr.mode,
1320				omap_port->ssr_base + SSI_SSR_MODE_REG);
1321	/* OCP barrier */
1322	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1323
1324	return 0;
1325}
1326
1327static int omap_ssi_port_runtime_suspend(struct device *dev)
1328{
1329	struct hsi_port *port = dev_get_drvdata(dev);
1330	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1331	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1332	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1333
1334	dev_dbg(dev, "port runtime suspend!\n");
1335
1336	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1337	if (omap_ssi->get_loss)
1338		omap_port->loss_count =
1339				omap_ssi->get_loss(ssi->device.parent);
1340	ssi_save_port_ctx(omap_port);
1341
1342	return 0;
1343}
1344
1345static int omap_ssi_port_runtime_resume(struct device *dev)
1346{
1347	struct hsi_port *port = dev_get_drvdata(dev);
1348	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1349	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1350	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1351
1352	dev_dbg(dev, "port runtime resume!\n");
1353
1354	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1355				omap_ssi->get_loss(ssi->device.parent)))
1356		goto mode; /* We always need to restore the mode & TX divisor */
1357
1358	ssi_restore_port_ctx(omap_port);
1359
1360mode:
1361	ssi_restore_divisor(omap_port);
1362	ssi_restore_port_mode(omap_port);
1363
1364	return 0;
1365}
1366
1367static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1368	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1369		omap_ssi_port_runtime_resume, NULL)
1370};
1371
1372#define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1373#else
1374#define DEV_PM_OPS     NULL
1375#endif
1376
1377
1378#ifdef CONFIG_OF
1379static const struct of_device_id omap_ssi_port_of_match[] = {
1380	{ .compatible = "ti,omap3-ssi-port", },
1381	{},
1382};
1383MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1384#else
1385#define omap_ssi_port_of_match NULL
1386#endif
1387
1388struct platform_driver ssi_port_pdriver = {
1389	.probe = ssi_port_probe,
1390	.remove	= ssi_port_remove,
1391	.driver	= {
1392		.name	= "omap_ssi_port",
1393		.of_match_table = omap_ssi_port_of_match,
1394		.pm	= DEV_PM_OPS,
1395	},
1396};
1397