1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic TXx9 ACLC platform driver
4 *
5 * Copyright (C) 2009 Atsushi Nemoto
6 *
7 * Based on RBTX49xx patch from CELF patch archive.
8 * (C) Copyright TOSHIBA CORPORATION 2004-2006
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/scatterlist.h>
15#include <linux/slab.h>
16#include <linux/dmaengine.h>
17#include <sound/core.h>
18#include <sound/pcm.h>
19#include <sound/pcm_params.h>
20#include <sound/soc.h>
21#include "txx9aclc.h"
22
23#define DRV_NAME "txx9aclc"
24
25static struct txx9aclc_soc_device {
26	struct txx9aclc_dmadata dmadata[2];
27} txx9aclc_soc_device;
28
29/* REVISIT: How to find txx9aclc_drvdata from snd_ac97? */
30static struct txx9aclc_plat_drvdata *txx9aclc_drvdata;
31
32static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
33			     struct txx9aclc_dmadata *dmadata);
34
35static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
36	/*
37	 * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
38	 * needs more works for noncoherent MIPS.
39	 */
40	.info		  = SNDRV_PCM_INFO_INTERLEAVED |
41			    SNDRV_PCM_INFO_BATCH |
42			    SNDRV_PCM_INFO_PAUSE,
43	.period_bytes_min = 1024,
44	.period_bytes_max = 8 * 1024,
45	.periods_min	  = 2,
46	.periods_max	  = 4096,
47	.buffer_bytes_max = 32 * 1024,
48};
49
50static int txx9aclc_pcm_hw_params(struct snd_soc_component *component,
51				  struct snd_pcm_substream *substream,
52				  struct snd_pcm_hw_params *params)
53{
54	struct snd_pcm_runtime *runtime = substream->runtime;
55	struct txx9aclc_dmadata *dmadata = runtime->private_data;
56
57	dev_dbg(component->dev,
58		"runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd "
59		"runtime->min_align %ld\n",
60		(unsigned long)runtime->dma_area,
61		(unsigned long)runtime->dma_addr, runtime->dma_bytes,
62		runtime->min_align);
63	dev_dbg(component->dev,
64		"periods %d period_bytes %d stream %d\n",
65		params_periods(params), params_period_bytes(params),
66		substream->stream);
67
68	dmadata->substream = substream;
69	dmadata->pos = 0;
70	return 0;
71}
72
73static int txx9aclc_pcm_prepare(struct snd_soc_component *component,
74				struct snd_pcm_substream *substream)
75{
76	struct snd_pcm_runtime *runtime = substream->runtime;
77	struct txx9aclc_dmadata *dmadata = runtime->private_data;
78
79	dmadata->dma_addr = runtime->dma_addr;
80	dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
81	dmadata->period_bytes = snd_pcm_lib_period_bytes(substream);
82
83	if (dmadata->buffer_bytes == dmadata->period_bytes) {
84		dmadata->frag_bytes = dmadata->period_bytes >> 1;
85		dmadata->frags = 2;
86	} else {
87		dmadata->frag_bytes = dmadata->period_bytes;
88		dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes;
89	}
90	dmadata->frag_count = 0;
91	dmadata->pos = 0;
92	return 0;
93}
94
95static void txx9aclc_dma_complete(void *arg)
96{
97	struct txx9aclc_dmadata *dmadata = arg;
98	unsigned long flags;
99
100	/* dma completion handler cannot submit new operations */
101	spin_lock_irqsave(&dmadata->dma_lock, flags);
102	if (dmadata->frag_count >= 0) {
103		dmadata->dmacount--;
104		if (!WARN_ON(dmadata->dmacount < 0))
105			queue_work(system_highpri_wq, &dmadata->work);
106	}
107	spin_unlock_irqrestore(&dmadata->dma_lock, flags);
108}
109
110static struct dma_async_tx_descriptor *
111txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
112{
113	struct dma_chan *chan = dmadata->dma_chan;
114	struct dma_async_tx_descriptor *desc;
115	struct scatterlist sg;
116
117	sg_init_table(&sg, 1);
118	sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
119		    dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
120	sg_dma_address(&sg) = buf_dma_addr;
121	desc = dmaengine_prep_slave_sg(chan, &sg, 1,
122		dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
123		DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
124		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
125	if (!desc) {
126		dev_err(&chan->dev->device, "cannot prepare slave dma\n");
127		return NULL;
128	}
129	desc->callback = txx9aclc_dma_complete;
130	desc->callback_param = dmadata;
131	dmaengine_submit(desc);
132	return desc;
133}
134
135#define NR_DMA_CHAIN		2
136
137static void txx9aclc_dma_work(struct work_struct *work)
138{
139	struct txx9aclc_dmadata *dmadata =
140		container_of(work, struct txx9aclc_dmadata, work);
141	struct dma_chan *chan = dmadata->dma_chan;
142	struct dma_async_tx_descriptor *desc;
143	struct snd_pcm_substream *substream = dmadata->substream;
144	u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
145		ACCTL_AUDODMA : ACCTL_AUDIDMA;
146	int i;
147	unsigned long flags;
148
149	spin_lock_irqsave(&dmadata->dma_lock, flags);
150	if (dmadata->frag_count < 0) {
151		struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
152		void __iomem *base = drvdata->base;
153
154		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
155		dmaengine_terminate_all(chan);
156		/* first time */
157		for (i = 0; i < NR_DMA_CHAIN; i++) {
158			desc = txx9aclc_dma_submit(dmadata,
159				dmadata->dma_addr + i * dmadata->frag_bytes);
160			if (!desc)
161				return;
162		}
163		dmadata->dmacount = NR_DMA_CHAIN;
164		dma_async_issue_pending(chan);
165		spin_lock_irqsave(&dmadata->dma_lock, flags);
166		__raw_writel(ctlbit, base + ACCTLEN);
167		dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
168		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
169		return;
170	}
171	if (WARN_ON(dmadata->dmacount >= NR_DMA_CHAIN)) {
172		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
173		return;
174	}
175	while (dmadata->dmacount < NR_DMA_CHAIN) {
176		dmadata->dmacount++;
177		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
178		desc = txx9aclc_dma_submit(dmadata,
179			dmadata->dma_addr +
180			dmadata->frag_count * dmadata->frag_bytes);
181		if (!desc)
182			return;
183		dma_async_issue_pending(chan);
184
185		spin_lock_irqsave(&dmadata->dma_lock, flags);
186		dmadata->frag_count++;
187		dmadata->frag_count %= dmadata->frags;
188		dmadata->pos += dmadata->frag_bytes;
189		dmadata->pos %= dmadata->buffer_bytes;
190		if ((dmadata->frag_count * dmadata->frag_bytes) %
191		    dmadata->period_bytes == 0)
192			snd_pcm_period_elapsed(substream);
193	}
194	spin_unlock_irqrestore(&dmadata->dma_lock, flags);
195}
196
197static int txx9aclc_pcm_trigger(struct snd_soc_component *component,
198				struct snd_pcm_substream *substream, int cmd)
199{
200	struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
201	struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
202	void __iomem *base = drvdata->base;
203	unsigned long flags;
204	int ret = 0;
205	u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
206		ACCTL_AUDODMA : ACCTL_AUDIDMA;
207
208	spin_lock_irqsave(&dmadata->dma_lock, flags);
209	switch (cmd) {
210	case SNDRV_PCM_TRIGGER_START:
211		dmadata->frag_count = -1;
212		queue_work(system_highpri_wq, &dmadata->work);
213		break;
214	case SNDRV_PCM_TRIGGER_STOP:
215	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
216	case SNDRV_PCM_TRIGGER_SUSPEND:
217		__raw_writel(ctlbit, base + ACCTLDIS);
218		break;
219	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
220	case SNDRV_PCM_TRIGGER_RESUME:
221		__raw_writel(ctlbit, base + ACCTLEN);
222		break;
223	default:
224		ret = -EINVAL;
225	}
226	spin_unlock_irqrestore(&dmadata->dma_lock, flags);
227	return ret;
228}
229
230static snd_pcm_uframes_t
231txx9aclc_pcm_pointer(struct snd_soc_component *component,
232		     struct snd_pcm_substream *substream)
233{
234	struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
235
236	return bytes_to_frames(substream->runtime, dmadata->pos);
237}
238
239static int txx9aclc_pcm_open(struct snd_soc_component *component,
240			     struct snd_pcm_substream *substream)
241{
242	struct txx9aclc_soc_device *dev = &txx9aclc_soc_device;
243	struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
244	int ret;
245
246	ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware);
247	if (ret)
248		return ret;
249	/* ensure that buffer size is a multiple of period size */
250	ret = snd_pcm_hw_constraint_integer(substream->runtime,
251					    SNDRV_PCM_HW_PARAM_PERIODS);
252	if (ret < 0)
253		return ret;
254	substream->runtime->private_data = dmadata;
255	return 0;
256}
257
258static int txx9aclc_pcm_close(struct snd_soc_component *component,
259			      struct snd_pcm_substream *substream)
260{
261	struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
262	struct dma_chan *chan = dmadata->dma_chan;
263
264	dmadata->frag_count = -1;
265	dmaengine_terminate_all(chan);
266	return 0;
267}
268
269static int txx9aclc_pcm_new(struct snd_soc_component *component,
270			    struct snd_soc_pcm_runtime *rtd)
271{
272	struct snd_card *card = rtd->card->snd_card;
273	struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
274	struct snd_pcm *pcm = rtd->pcm;
275	struct platform_device *pdev = to_platform_device(component->dev);
276	struct txx9aclc_soc_device *dev;
277	struct resource *r;
278	int i;
279	int ret;
280
281	/* at this point onwards the AC97 component has probed and this will be valid */
282	dev = snd_soc_dai_get_drvdata(dai);
283
284	dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK;
285	dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE;
286	for (i = 0; i < 2; i++) {
287		r = platform_get_resource(pdev, IORESOURCE_DMA, i);
288		if (!r) {
289			ret = -EBUSY;
290			goto exit;
291		}
292		dev->dmadata[i].dma_res = r;
293		ret = txx9aclc_dma_init(dev, &dev->dmadata[i]);
294		if (ret)
295			goto exit;
296	}
297
298	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
299		card->dev, 64 * 1024, 4 * 1024 * 1024);
300	return 0;
301
302exit:
303	for (i = 0; i < 2; i++) {
304		if (dev->dmadata[i].dma_chan)
305			dma_release_channel(dev->dmadata[i].dma_chan);
306		dev->dmadata[i].dma_chan = NULL;
307	}
308	return ret;
309}
310
311static bool filter(struct dma_chan *chan, void *param)
312{
313	struct txx9aclc_dmadata *dmadata = param;
314	char *devname;
315	bool found = false;
316
317	devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
318		(int)dmadata->dma_res->start);
319	if (strcmp(dev_name(chan->device->dev), devname) == 0) {
320		chan->private = &dmadata->dma_slave;
321		found = true;
322	}
323	kfree(devname);
324	return found;
325}
326
327static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
328			     struct txx9aclc_dmadata *dmadata)
329{
330	struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
331	struct txx9dmac_slave *ds = &dmadata->dma_slave;
332	dma_cap_mask_t mask;
333
334	spin_lock_init(&dmadata->dma_lock);
335
336	ds->reg_width = sizeof(u32);
337	if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) {
338		ds->tx_reg = drvdata->physbase + ACAUDODAT;
339		ds->rx_reg = 0;
340	} else {
341		ds->tx_reg = 0;
342		ds->rx_reg = drvdata->physbase + ACAUDIDAT;
343	}
344
345	/* Try to grab a DMA channel */
346	dma_cap_zero(mask);
347	dma_cap_set(DMA_SLAVE, mask);
348	dmadata->dma_chan = dma_request_channel(mask, filter, dmadata);
349	if (!dmadata->dma_chan) {
350		printk(KERN_ERR
351			"DMA channel for %s is not available\n",
352			dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ?
353			"playback" : "capture");
354		return -EBUSY;
355	}
356	INIT_WORK(&dmadata->work, txx9aclc_dma_work);
357	return 0;
358}
359
360static int txx9aclc_pcm_probe(struct snd_soc_component *component)
361{
362	snd_soc_component_set_drvdata(component, &txx9aclc_soc_device);
363	return 0;
364}
365
366static void txx9aclc_pcm_remove(struct snd_soc_component *component)
367{
368	struct txx9aclc_soc_device *dev = snd_soc_component_get_drvdata(component);
369	struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
370	void __iomem *base = drvdata->base;
371	int i;
372
373	/* disable all FIFO DMAs */
374	__raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS);
375	/* dummy R/W to clear pending DMAREQ if any */
376	__raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT);
377
378	for (i = 0; i < 2; i++) {
379		struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
380		struct dma_chan *chan = dmadata->dma_chan;
381
382		if (chan) {
383			dmadata->frag_count = -1;
384			dmaengine_terminate_all(chan);
385			dma_release_channel(chan);
386		}
387		dev->dmadata[i].dma_chan = NULL;
388	}
389}
390
391static const struct snd_soc_component_driver txx9aclc_soc_component = {
392	.name		= DRV_NAME,
393	.probe		= txx9aclc_pcm_probe,
394	.remove		= txx9aclc_pcm_remove,
395	.open		= txx9aclc_pcm_open,
396	.close		= txx9aclc_pcm_close,
397	.hw_params	= txx9aclc_pcm_hw_params,
398	.prepare	= txx9aclc_pcm_prepare,
399	.trigger	= txx9aclc_pcm_trigger,
400	.pointer	= txx9aclc_pcm_pointer,
401	.pcm_construct	= txx9aclc_pcm_new,
402};
403
404static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
405{
406	return devm_snd_soc_register_component(&pdev->dev,
407					&txx9aclc_soc_component, NULL, 0);
408}
409
410static struct platform_driver txx9aclc_pcm_driver = {
411	.driver = {
412			.name = "txx9aclc-pcm-audio",
413	},
414
415	.probe = txx9aclc_soc_platform_probe,
416};
417
418module_platform_driver(txx9aclc_pcm_driver);
419
420MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
421MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver");
422MODULE_LICENSE("GPL");
423