xref: /kernel/linux/linux-6.6/drivers/dma/pxa_dma.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 */
5
6#include <linux/err.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/dma-mapping.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/device.h>
16#include <linux/platform_data/mmp_dma.h>
17#include <linux/dmapool.h>
18#include <linux/of_device.h>
19#include <linux/of_dma.h>
20#include <linux/of.h>
21#include <linux/wait.h>
22#include <linux/dma/pxa-dma.h>
23
24#include "dmaengine.h"
25#include "virt-dma.h"
26
27#define DCSR(n)		(0x0000 + ((n) << 2))
28#define DALGN(n)	0x00a0
29#define DINT		0x00f0
30#define DDADR(n)	(0x0200 + ((n) << 4))
31#define DSADR(n)	(0x0204 + ((n) << 4))
32#define DTADR(n)	(0x0208 + ((n) << 4))
33#define DCMD(n)		(0x020c + ((n) << 4))
34
35#define PXA_DCSR_RUN		BIT(31)	/* Run Bit (read / write) */
36#define PXA_DCSR_NODESC		BIT(30)	/* No-Descriptor Fetch (read / write) */
37#define PXA_DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (R/W) */
38#define PXA_DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
39#define PXA_DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
40#define PXA_DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
41#define PXA_DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
42#define PXA_DCSR_BUSERR		BIT(0)	/* Bus Error Interrupt (read / write) */
43
44#define PXA_DCSR_EORIRQEN	BIT(28)	/* End of Receive IRQ Enable (R/W) */
45#define PXA_DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
46#define PXA_DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
47#define PXA_DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
48#define PXA_DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
49#define PXA_DCSR_CMPST		BIT(10)	/* The Descriptor Compare Status */
50#define PXA_DCSR_EORINTR	BIT(9)	/* The end of Receive */
51
52#define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
53#define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
54
55#define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
56#define DDADR_STOP	BIT(0)	/* Stop (read / write) */
57
58#define PXA_DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
59#define PXA_DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
60#define PXA_DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
61#define PXA_DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
62#define PXA_DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
63#define PXA_DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
64#define PXA_DCMD_ENDIAN		BIT(18)	/* Device Endian-ness. */
65#define PXA_DCMD_BURST8		(1 << 16)	/* 8 byte burst */
66#define PXA_DCMD_BURST16	(2 << 16)	/* 16 byte burst */
67#define PXA_DCMD_BURST32	(3 << 16)	/* 32 byte burst */
68#define PXA_DCMD_WIDTH1		(1 << 14)	/* 1 byte width */
69#define PXA_DCMD_WIDTH2		(2 << 14)	/* 2 byte width (HalfWord) */
70#define PXA_DCMD_WIDTH4		(3 << 14)	/* 4 byte width (Word) */
71#define PXA_DCMD_LENGTH		0x01fff		/* length mask (max = 8K - 1) */
72
73#define PDMA_ALIGNMENT		3
74#define PDMA_MAX_DESC_BYTES	(PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
75
76struct pxad_desc_hw {
77	u32 ddadr;	/* Points to the next descriptor + flags */
78	u32 dsadr;	/* DSADR value for the current transfer */
79	u32 dtadr;	/* DTADR value for the current transfer */
80	u32 dcmd;	/* DCMD value for the current transfer */
81} __aligned(16);
82
83struct pxad_desc_sw {
84	struct virt_dma_desc	vd;		/* Virtual descriptor */
85	int			nb_desc;	/* Number of hw. descriptors */
86	size_t			len;		/* Number of bytes xfered */
87	dma_addr_t		first;		/* First descriptor's addr */
88
89	/* At least one descriptor has an src/dst address not multiple of 8 */
90	bool			misaligned;
91	bool			cyclic;
92	struct dma_pool		*desc_pool;	/* Channel's used allocator */
93
94	struct pxad_desc_hw	*hw_desc[];	/* DMA coherent descriptors */
95};
96
97struct pxad_phy {
98	int			idx;
99	void __iomem		*base;
100	struct pxad_chan	*vchan;
101};
102
103struct pxad_chan {
104	struct virt_dma_chan	vc;		/* Virtual channel */
105	u32			drcmr;		/* Requestor of the channel */
106	enum pxad_chan_prio	prio;		/* Required priority of phy */
107	/*
108	 * At least one desc_sw in submitted or issued transfers on this channel
109	 * has one address such as: addr % 8 != 0. This implies the DALGN
110	 * setting on the phy.
111	 */
112	bool			misaligned;
113	struct dma_slave_config	cfg;		/* Runtime config */
114
115	/* protected by vc->lock */
116	struct pxad_phy		*phy;
117	struct dma_pool		*desc_pool;	/* Descriptors pool */
118	dma_cookie_t		bus_error;
119
120	wait_queue_head_t	wq_state;
121};
122
123struct pxad_device {
124	struct dma_device		slave;
125	int				nr_chans;
126	int				nr_requestors;
127	void __iomem			*base;
128	struct pxad_phy			*phys;
129	spinlock_t			phy_lock;	/* Phy association */
130#ifdef CONFIG_DEBUG_FS
131	struct dentry			*dbgfs_root;
132	struct dentry			**dbgfs_chan;
133#endif
134};
135
136#define tx_to_pxad_desc(tx)					\
137	container_of(tx, struct pxad_desc_sw, async_tx)
138#define to_pxad_chan(dchan)					\
139	container_of(dchan, struct pxad_chan, vc.chan)
140#define to_pxad_dev(dmadev)					\
141	container_of(dmadev, struct pxad_device, slave)
142#define to_pxad_sw_desc(_vd)				\
143	container_of((_vd), struct pxad_desc_sw, vd)
144
145#define _phy_readl_relaxed(phy, _reg)					\
146	readl_relaxed((phy)->base + _reg((phy)->idx))
147#define phy_readl_relaxed(phy, _reg)					\
148	({								\
149		u32 _v;							\
150		_v = readl_relaxed((phy)->base + _reg((phy)->idx));	\
151		dev_vdbg(&phy->vchan->vc.chan.dev->device,		\
152			 "%s(): readl(%s): 0x%08x\n", __func__, #_reg,	\
153			  _v);						\
154		_v;							\
155	})
156#define phy_writel(phy, val, _reg)					\
157	do {								\
158		writel((val), (phy)->base + _reg((phy)->idx));		\
159		dev_vdbg(&phy->vchan->vc.chan.dev->device,		\
160			 "%s(): writel(0x%08x, %s)\n",			\
161			 __func__, (u32)(val), #_reg);			\
162	} while (0)
163#define phy_writel_relaxed(phy, val, _reg)				\
164	do {								\
165		writel_relaxed((val), (phy)->base + _reg((phy)->idx));	\
166		dev_vdbg(&phy->vchan->vc.chan.dev->device,		\
167			 "%s(): writel_relaxed(0x%08x, %s)\n",		\
168			 __func__, (u32)(val), #_reg);			\
169	} while (0)
170
171static unsigned int pxad_drcmr(unsigned int line)
172{
173	if (line < 64)
174		return 0x100 + line * 4;
175	return 0x1000 + line * 4;
176}
177
178static bool pxad_filter_fn(struct dma_chan *chan, void *param);
179
180/*
181 * Debug fs
182 */
183#ifdef CONFIG_DEBUG_FS
184#include <linux/debugfs.h>
185#include <linux/uaccess.h>
186#include <linux/seq_file.h>
187
188static int requester_chan_show(struct seq_file *s, void *p)
189{
190	struct pxad_phy *phy = s->private;
191	int i;
192	u32 drcmr;
193
194	seq_printf(s, "DMA channel %d requester :\n", phy->idx);
195	for (i = 0; i < 70; i++) {
196		drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
197		if ((drcmr & DRCMR_CHLNUM) == phy->idx)
198			seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
199				   !!(drcmr & DRCMR_MAPVLD));
200	}
201	return 0;
202}
203
204static inline int dbg_burst_from_dcmd(u32 dcmd)
205{
206	int burst = (dcmd >> 16) & 0x3;
207
208	return burst ? 4 << burst : 0;
209}
210
211static int is_phys_valid(unsigned long addr)
212{
213	return pfn_valid(__phys_to_pfn(addr));
214}
215
216#define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
217#define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
218
219static int descriptors_show(struct seq_file *s, void *p)
220{
221	struct pxad_phy *phy = s->private;
222	int i, max_show = 20, burst, width;
223	u32 dcmd;
224	unsigned long phys_desc, ddadr;
225	struct pxad_desc_hw *desc;
226
227	phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
228
229	seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
230	seq_printf(s, "[%03d] First descriptor unknown\n", 0);
231	for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
232		desc = phys_to_virt(phys_desc);
233		dcmd = desc->dcmd;
234		burst = dbg_burst_from_dcmd(dcmd);
235		width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
236
237		seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
238			   i, phys_desc, desc);
239		seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
240		seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
241		seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
242		seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
243			   dcmd,
244			   PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
245			   PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
246			   PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
247			   PXA_DCMD_STR(ENDIAN), burst, width,
248			   dcmd & PXA_DCMD_LENGTH);
249		phys_desc = desc->ddadr;
250	}
251	if (i == max_show)
252		seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
253			   i, phys_desc);
254	else
255		seq_printf(s, "[%03d] Desc at %08lx is %s\n",
256			   i, phys_desc, phys_desc == DDADR_STOP ?
257			   "DDADR_STOP" : "invalid");
258
259	return 0;
260}
261
262static int chan_state_show(struct seq_file *s, void *p)
263{
264	struct pxad_phy *phy = s->private;
265	u32 dcsr, dcmd;
266	int burst, width;
267	static const char * const str_prio[] = {
268		"high", "normal", "low", "invalid"
269	};
270
271	dcsr = _phy_readl_relaxed(phy, DCSR);
272	dcmd = _phy_readl_relaxed(phy, DCMD);
273	burst = dbg_burst_from_dcmd(dcmd);
274	width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
275
276	seq_printf(s, "DMA channel %d\n", phy->idx);
277	seq_printf(s, "\tPriority : %s\n",
278			  str_prio[(phy->idx & 0xf) / 4]);
279	seq_printf(s, "\tUnaligned transfer bit: %s\n",
280			  _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
281			  "yes" : "no");
282	seq_printf(s, "\tDCSR  = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
283		   dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
284		   PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
285		   PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
286		   PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
287		   PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
288		   PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
289		   PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
290		   PXA_DCSR_STR(BUSERR));
291
292	seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
293		   dcmd,
294		   PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
295		   PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
296		   PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
297		   PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
298	seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
299	seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
300	seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
301
302	return 0;
303}
304
305static int state_show(struct seq_file *s, void *p)
306{
307	struct pxad_device *pdev = s->private;
308
309	/* basic device status */
310	seq_puts(s, "DMA engine status\n");
311	seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
312
313	return 0;
314}
315
316DEFINE_SHOW_ATTRIBUTE(state);
317DEFINE_SHOW_ATTRIBUTE(chan_state);
318DEFINE_SHOW_ATTRIBUTE(descriptors);
319DEFINE_SHOW_ATTRIBUTE(requester_chan);
320
321static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
322					     int ch, struct dentry *chandir)
323{
324	char chan_name[11];
325	struct dentry *chan;
326	void *dt;
327
328	scnprintf(chan_name, sizeof(chan_name), "%d", ch);
329	chan = debugfs_create_dir(chan_name, chandir);
330	dt = (void *)&pdev->phys[ch];
331
332	debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
333	debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
334	debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
335
336	return chan;
337}
338
339static void pxad_init_debugfs(struct pxad_device *pdev)
340{
341	int i;
342	struct dentry *chandir;
343
344	pdev->dbgfs_chan =
345		kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
346			      GFP_KERNEL);
347	if (!pdev->dbgfs_chan)
348		return;
349
350	pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
351
352	debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
353
354	chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
355
356	for (i = 0; i < pdev->nr_chans; i++)
357		pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
358}
359
360static void pxad_cleanup_debugfs(struct pxad_device *pdev)
361{
362	debugfs_remove_recursive(pdev->dbgfs_root);
363}
364#else
365static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
366static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
367#endif
368
369static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
370{
371	int prio, i;
372	struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
373	struct pxad_phy *phy, *found = NULL;
374	unsigned long flags;
375
376	/*
377	 * dma channel priorities
378	 * ch 0 - 3,  16 - 19  <--> (0)
379	 * ch 4 - 7,  20 - 23  <--> (1)
380	 * ch 8 - 11, 24 - 27  <--> (2)
381	 * ch 12 - 15, 28 - 31  <--> (3)
382	 */
383
384	spin_lock_irqsave(&pdev->phy_lock, flags);
385	for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
386		for (i = 0; i < pdev->nr_chans; i++) {
387			if (prio != (i & 0xf) >> 2)
388				continue;
389			phy = &pdev->phys[i];
390			if (!phy->vchan) {
391				phy->vchan = pchan;
392				found = phy;
393				goto out_unlock;
394			}
395		}
396	}
397
398out_unlock:
399	spin_unlock_irqrestore(&pdev->phy_lock, flags);
400	dev_dbg(&pchan->vc.chan.dev->device,
401		"%s(): phy=%p(%d)\n", __func__, found,
402		found ? found->idx : -1);
403
404	return found;
405}
406
407static void pxad_free_phy(struct pxad_chan *chan)
408{
409	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
410	unsigned long flags;
411	u32 reg;
412
413	dev_dbg(&chan->vc.chan.dev->device,
414		"%s(): freeing\n", __func__);
415	if (!chan->phy)
416		return;
417
418	/* clear the channel mapping in DRCMR */
419	if (chan->drcmr <= pdev->nr_requestors) {
420		reg = pxad_drcmr(chan->drcmr);
421		writel_relaxed(0, chan->phy->base + reg);
422	}
423
424	spin_lock_irqsave(&pdev->phy_lock, flags);
425	chan->phy->vchan = NULL;
426	chan->phy = NULL;
427	spin_unlock_irqrestore(&pdev->phy_lock, flags);
428}
429
430static bool is_chan_running(struct pxad_chan *chan)
431{
432	u32 dcsr;
433	struct pxad_phy *phy = chan->phy;
434
435	if (!phy)
436		return false;
437	dcsr = phy_readl_relaxed(phy, DCSR);
438	return dcsr & PXA_DCSR_RUN;
439}
440
441static bool is_running_chan_misaligned(struct pxad_chan *chan)
442{
443	u32 dalgn;
444
445	BUG_ON(!chan->phy);
446	dalgn = phy_readl_relaxed(chan->phy, DALGN);
447	return dalgn & (BIT(chan->phy->idx));
448}
449
450static void phy_enable(struct pxad_phy *phy, bool misaligned)
451{
452	struct pxad_device *pdev;
453	u32 reg, dalgn;
454
455	if (!phy->vchan)
456		return;
457
458	dev_dbg(&phy->vchan->vc.chan.dev->device,
459		"%s(); phy=%p(%d) misaligned=%d\n", __func__,
460		phy, phy->idx, misaligned);
461
462	pdev = to_pxad_dev(phy->vchan->vc.chan.device);
463	if (phy->vchan->drcmr <= pdev->nr_requestors) {
464		reg = pxad_drcmr(phy->vchan->drcmr);
465		writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
466	}
467
468	dalgn = phy_readl_relaxed(phy, DALGN);
469	if (misaligned)
470		dalgn |= BIT(phy->idx);
471	else
472		dalgn &= ~BIT(phy->idx);
473	phy_writel_relaxed(phy, dalgn, DALGN);
474
475	phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
476		   PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
477}
478
479static void phy_disable(struct pxad_phy *phy)
480{
481	u32 dcsr;
482
483	if (!phy)
484		return;
485
486	dcsr = phy_readl_relaxed(phy, DCSR);
487	dev_dbg(&phy->vchan->vc.chan.dev->device,
488		"%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
489	phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
490}
491
492static void pxad_launch_chan(struct pxad_chan *chan,
493				 struct pxad_desc_sw *desc)
494{
495	dev_dbg(&chan->vc.chan.dev->device,
496		"%s(): desc=%p\n", __func__, desc);
497	if (!chan->phy) {
498		chan->phy = lookup_phy(chan);
499		if (!chan->phy) {
500			dev_dbg(&chan->vc.chan.dev->device,
501				"%s(): no free dma channel\n", __func__);
502			return;
503		}
504	}
505	chan->bus_error = 0;
506
507	/*
508	 * Program the descriptor's address into the DMA controller,
509	 * then start the DMA transaction
510	 */
511	phy_writel(chan->phy, desc->first, DDADR);
512	phy_enable(chan->phy, chan->misaligned);
513	wake_up(&chan->wq_state);
514}
515
516static void set_updater_desc(struct pxad_desc_sw *sw_desc,
517			     unsigned long flags)
518{
519	struct pxad_desc_hw *updater =
520		sw_desc->hw_desc[sw_desc->nb_desc - 1];
521	dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
522
523	updater->ddadr = DDADR_STOP;
524	updater->dsadr = dma;
525	updater->dtadr = dma + 8;
526	updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
527		(PXA_DCMD_LENGTH & sizeof(u32));
528	if (flags & DMA_PREP_INTERRUPT)
529		updater->dcmd |= PXA_DCMD_ENDIRQEN;
530	if (sw_desc->cyclic)
531		sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
532}
533
534static bool is_desc_completed(struct virt_dma_desc *vd)
535{
536	struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
537	struct pxad_desc_hw *updater =
538		sw_desc->hw_desc[sw_desc->nb_desc - 1];
539
540	return updater->dtadr != (updater->dsadr + 8);
541}
542
543static void pxad_desc_chain(struct virt_dma_desc *vd1,
544				struct virt_dma_desc *vd2)
545{
546	struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
547	struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
548	dma_addr_t dma_to_chain;
549
550	dma_to_chain = desc2->first;
551	desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
552}
553
554static bool pxad_try_hotchain(struct virt_dma_chan *vc,
555				  struct virt_dma_desc *vd)
556{
557	struct virt_dma_desc *vd_last_issued = NULL;
558	struct pxad_chan *chan = to_pxad_chan(&vc->chan);
559
560	/*
561	 * Attempt to hot chain the tx if the phy is still running. This is
562	 * considered successful only if either the channel is still running
563	 * after the chaining, or if the chained transfer is completed after
564	 * having been hot chained.
565	 * A change of alignment is not allowed, and forbids hotchaining.
566	 */
567	if (is_chan_running(chan)) {
568		BUG_ON(list_empty(&vc->desc_issued));
569
570		if (!is_running_chan_misaligned(chan) &&
571		    to_pxad_sw_desc(vd)->misaligned)
572			return false;
573
574		vd_last_issued = list_entry(vc->desc_issued.prev,
575					    struct virt_dma_desc, node);
576		pxad_desc_chain(vd_last_issued, vd);
577		if (is_chan_running(chan) || is_desc_completed(vd))
578			return true;
579	}
580
581	return false;
582}
583
584static unsigned int clear_chan_irq(struct pxad_phy *phy)
585{
586	u32 dcsr;
587	u32 dint = readl(phy->base + DINT);
588
589	if (!(dint & BIT(phy->idx)))
590		return PXA_DCSR_RUN;
591
592	/* clear irq */
593	dcsr = phy_readl_relaxed(phy, DCSR);
594	phy_writel(phy, dcsr, DCSR);
595	if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
596		dev_warn(&phy->vchan->vc.chan.dev->device,
597			 "%s(chan=%p): PXA_DCSR_BUSERR\n",
598			 __func__, &phy->vchan);
599
600	return dcsr & ~PXA_DCSR_RUN;
601}
602
603static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
604{
605	struct pxad_phy *phy = dev_id;
606	struct pxad_chan *chan = phy->vchan;
607	struct virt_dma_desc *vd, *tmp;
608	unsigned int dcsr;
609	bool vd_completed;
610	dma_cookie_t last_started = 0;
611
612	BUG_ON(!chan);
613
614	dcsr = clear_chan_irq(phy);
615	if (dcsr & PXA_DCSR_RUN)
616		return IRQ_NONE;
617
618	spin_lock(&chan->vc.lock);
619	list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
620		vd_completed = is_desc_completed(vd);
621		dev_dbg(&chan->vc.chan.dev->device,
622			"%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
623			__func__, vd, vd->tx.cookie, vd_completed,
624			dcsr);
625		last_started = vd->tx.cookie;
626		if (to_pxad_sw_desc(vd)->cyclic) {
627			vchan_cyclic_callback(vd);
628			break;
629		}
630		if (vd_completed) {
631			list_del(&vd->node);
632			vchan_cookie_complete(vd);
633		} else {
634			break;
635		}
636	}
637
638	if (dcsr & PXA_DCSR_BUSERR) {
639		chan->bus_error = last_started;
640		phy_disable(phy);
641	}
642
643	if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
644		dev_dbg(&chan->vc.chan.dev->device,
645		"%s(): channel stopped, submitted_empty=%d issued_empty=%d",
646			__func__,
647			list_empty(&chan->vc.desc_submitted),
648			list_empty(&chan->vc.desc_issued));
649		phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
650
651		if (list_empty(&chan->vc.desc_issued)) {
652			chan->misaligned =
653				!list_empty(&chan->vc.desc_submitted);
654		} else {
655			vd = list_first_entry(&chan->vc.desc_issued,
656					      struct virt_dma_desc, node);
657			pxad_launch_chan(chan, to_pxad_sw_desc(vd));
658		}
659	}
660	spin_unlock(&chan->vc.lock);
661	wake_up(&chan->wq_state);
662
663	return IRQ_HANDLED;
664}
665
666static irqreturn_t pxad_int_handler(int irq, void *dev_id)
667{
668	struct pxad_device *pdev = dev_id;
669	struct pxad_phy *phy;
670	u32 dint = readl(pdev->base + DINT);
671	int i, ret = IRQ_NONE;
672
673	while (dint) {
674		i = __ffs(dint);
675		dint &= (dint - 1);
676		phy = &pdev->phys[i];
677		if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
678			ret = IRQ_HANDLED;
679	}
680
681	return ret;
682}
683
684static int pxad_alloc_chan_resources(struct dma_chan *dchan)
685{
686	struct pxad_chan *chan = to_pxad_chan(dchan);
687	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
688
689	if (chan->desc_pool)
690		return 1;
691
692	chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
693					  pdev->slave.dev,
694					  sizeof(struct pxad_desc_hw),
695					  __alignof__(struct pxad_desc_hw),
696					  0);
697	if (!chan->desc_pool) {
698		dev_err(&chan->vc.chan.dev->device,
699			"%s(): unable to allocate descriptor pool\n",
700			__func__);
701		return -ENOMEM;
702	}
703
704	return 1;
705}
706
707static void pxad_free_chan_resources(struct dma_chan *dchan)
708{
709	struct pxad_chan *chan = to_pxad_chan(dchan);
710
711	vchan_free_chan_resources(&chan->vc);
712	dma_pool_destroy(chan->desc_pool);
713	chan->desc_pool = NULL;
714
715	chan->drcmr = U32_MAX;
716	chan->prio = PXAD_PRIO_LOWEST;
717}
718
719static void pxad_free_desc(struct virt_dma_desc *vd)
720{
721	int i;
722	dma_addr_t dma;
723	struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
724
725	for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
726		if (i > 0)
727			dma = sw_desc->hw_desc[i - 1]->ddadr;
728		else
729			dma = sw_desc->first;
730		dma_pool_free(sw_desc->desc_pool,
731			      sw_desc->hw_desc[i], dma);
732	}
733	sw_desc->nb_desc = 0;
734	kfree(sw_desc);
735}
736
737static struct pxad_desc_sw *
738pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
739{
740	struct pxad_desc_sw *sw_desc;
741	dma_addr_t dma;
742	int i;
743
744	sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
745			  GFP_NOWAIT);
746	if (!sw_desc)
747		return NULL;
748	sw_desc->desc_pool = chan->desc_pool;
749
750	for (i = 0; i < nb_hw_desc; i++) {
751		sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
752						     GFP_NOWAIT, &dma);
753		if (!sw_desc->hw_desc[i]) {
754			dev_err(&chan->vc.chan.dev->device,
755				"%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
756				__func__, i, sw_desc->desc_pool);
757			goto err;
758		}
759
760		if (i == 0)
761			sw_desc->first = dma;
762		else
763			sw_desc->hw_desc[i - 1]->ddadr = dma;
764		sw_desc->nb_desc++;
765	}
766
767	return sw_desc;
768err:
769	pxad_free_desc(&sw_desc->vd);
770	return NULL;
771}
772
773static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
774{
775	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
776	struct pxad_chan *chan = to_pxad_chan(&vc->chan);
777	struct virt_dma_desc *vd_chained = NULL,
778		*vd = container_of(tx, struct virt_dma_desc, tx);
779	dma_cookie_t cookie;
780	unsigned long flags;
781
782	set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
783
784	spin_lock_irqsave(&vc->lock, flags);
785	cookie = dma_cookie_assign(tx);
786
787	if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
788		list_move_tail(&vd->node, &vc->desc_issued);
789		dev_dbg(&chan->vc.chan.dev->device,
790			"%s(): txd %p[%x]: submitted (hot linked)\n",
791			__func__, vd, cookie);
792		goto out;
793	}
794
795	/*
796	 * Fallback to placing the tx in the submitted queue
797	 */
798	if (!list_empty(&vc->desc_submitted)) {
799		vd_chained = list_entry(vc->desc_submitted.prev,
800					struct virt_dma_desc, node);
801		/*
802		 * Only chain the descriptors if no new misalignment is
803		 * introduced. If a new misalignment is chained, let the channel
804		 * stop, and be relaunched in misalign mode from the irq
805		 * handler.
806		 */
807		if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
808			pxad_desc_chain(vd_chained, vd);
809		else
810			vd_chained = NULL;
811	}
812	dev_dbg(&chan->vc.chan.dev->device,
813		"%s(): txd %p[%x]: submitted (%s linked)\n",
814		__func__, vd, cookie, vd_chained ? "cold" : "not");
815	list_move_tail(&vd->node, &vc->desc_submitted);
816	chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
817
818out:
819	spin_unlock_irqrestore(&vc->lock, flags);
820	return cookie;
821}
822
823static void pxad_issue_pending(struct dma_chan *dchan)
824{
825	struct pxad_chan *chan = to_pxad_chan(dchan);
826	struct virt_dma_desc *vd_first;
827	unsigned long flags;
828
829	spin_lock_irqsave(&chan->vc.lock, flags);
830	if (list_empty(&chan->vc.desc_submitted))
831		goto out;
832
833	vd_first = list_first_entry(&chan->vc.desc_submitted,
834				    struct virt_dma_desc, node);
835	dev_dbg(&chan->vc.chan.dev->device,
836		"%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
837
838	vchan_issue_pending(&chan->vc);
839	if (!pxad_try_hotchain(&chan->vc, vd_first))
840		pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
841out:
842	spin_unlock_irqrestore(&chan->vc.lock, flags);
843}
844
845static inline struct dma_async_tx_descriptor *
846pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
847		 unsigned long tx_flags)
848{
849	struct dma_async_tx_descriptor *tx;
850	struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
851
852	INIT_LIST_HEAD(&vd->node);
853	tx = vchan_tx_prep(vc, vd, tx_flags);
854	tx->tx_submit = pxad_tx_submit;
855	dev_dbg(&chan->vc.chan.dev->device,
856		"%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
857		vc, vd, vd->tx.cookie,
858		tx_flags);
859
860	return tx;
861}
862
863static void pxad_get_config(struct pxad_chan *chan,
864			    enum dma_transfer_direction dir,
865			    u32 *dcmd, u32 *dev_src, u32 *dev_dst)
866{
867	u32 maxburst = 0, dev_addr = 0;
868	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
869	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
870
871	*dcmd = 0;
872	if (dir == DMA_DEV_TO_MEM) {
873		maxburst = chan->cfg.src_maxburst;
874		width = chan->cfg.src_addr_width;
875		dev_addr = chan->cfg.src_addr;
876		*dev_src = dev_addr;
877		*dcmd |= PXA_DCMD_INCTRGADDR;
878		if (chan->drcmr <= pdev->nr_requestors)
879			*dcmd |= PXA_DCMD_FLOWSRC;
880	}
881	if (dir == DMA_MEM_TO_DEV) {
882		maxburst = chan->cfg.dst_maxburst;
883		width = chan->cfg.dst_addr_width;
884		dev_addr = chan->cfg.dst_addr;
885		*dev_dst = dev_addr;
886		*dcmd |= PXA_DCMD_INCSRCADDR;
887		if (chan->drcmr <= pdev->nr_requestors)
888			*dcmd |= PXA_DCMD_FLOWTRG;
889	}
890	if (dir == DMA_MEM_TO_MEM)
891		*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
892			PXA_DCMD_INCSRCADDR;
893
894	dev_dbg(&chan->vc.chan.dev->device,
895		"%s(): dev_addr=0x%x maxburst=%d width=%d  dir=%d\n",
896		__func__, dev_addr, maxburst, width, dir);
897
898	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
899		*dcmd |= PXA_DCMD_WIDTH1;
900	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
901		*dcmd |= PXA_DCMD_WIDTH2;
902	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
903		*dcmd |= PXA_DCMD_WIDTH4;
904
905	if (maxburst == 8)
906		*dcmd |= PXA_DCMD_BURST8;
907	else if (maxburst == 16)
908		*dcmd |= PXA_DCMD_BURST16;
909	else if (maxburst == 32)
910		*dcmd |= PXA_DCMD_BURST32;
911}
912
913static struct dma_async_tx_descriptor *
914pxad_prep_memcpy(struct dma_chan *dchan,
915		 dma_addr_t dma_dst, dma_addr_t dma_src,
916		 size_t len, unsigned long flags)
917{
918	struct pxad_chan *chan = to_pxad_chan(dchan);
919	struct pxad_desc_sw *sw_desc;
920	struct pxad_desc_hw *hw_desc;
921	u32 dcmd;
922	unsigned int i, nb_desc = 0;
923	size_t copy;
924
925	if (!dchan || !len)
926		return NULL;
927
928	dev_dbg(&chan->vc.chan.dev->device,
929		"%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
930		__func__, (unsigned long)dma_dst, (unsigned long)dma_src,
931		len, flags);
932	pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
933
934	nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
935	sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
936	if (!sw_desc)
937		return NULL;
938	sw_desc->len = len;
939
940	if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
941	    !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
942		sw_desc->misaligned = true;
943
944	i = 0;
945	do {
946		hw_desc = sw_desc->hw_desc[i++];
947		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
948		hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
949		hw_desc->dsadr = dma_src;
950		hw_desc->dtadr = dma_dst;
951		len -= copy;
952		dma_src += copy;
953		dma_dst += copy;
954	} while (len);
955	set_updater_desc(sw_desc, flags);
956
957	return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
958}
959
960static struct dma_async_tx_descriptor *
961pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
962		   unsigned int sg_len, enum dma_transfer_direction dir,
963		   unsigned long flags, void *context)
964{
965	struct pxad_chan *chan = to_pxad_chan(dchan);
966	struct pxad_desc_sw *sw_desc;
967	size_t len, avail;
968	struct scatterlist *sg;
969	dma_addr_t dma;
970	u32 dcmd, dsadr = 0, dtadr = 0;
971	unsigned int nb_desc = 0, i, j = 0;
972
973	if ((sgl == NULL) || (sg_len == 0))
974		return NULL;
975
976	pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
977	dev_dbg(&chan->vc.chan.dev->device,
978		"%s(): dir=%d flags=%lx\n", __func__, dir, flags);
979
980	for_each_sg(sgl, sg, sg_len, i)
981		nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
982	sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
983	if (!sw_desc)
984		return NULL;
985
986	for_each_sg(sgl, sg, sg_len, i) {
987		dma = sg_dma_address(sg);
988		avail = sg_dma_len(sg);
989		sw_desc->len += avail;
990
991		do {
992			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
993			if (dma & 0x7)
994				sw_desc->misaligned = true;
995
996			sw_desc->hw_desc[j]->dcmd =
997				dcmd | (PXA_DCMD_LENGTH & len);
998			sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
999			sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1000
1001			dma += len;
1002			avail -= len;
1003		} while (avail);
1004	}
1005	set_updater_desc(sw_desc, flags);
1006
1007	return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1008}
1009
1010static struct dma_async_tx_descriptor *
1011pxad_prep_dma_cyclic(struct dma_chan *dchan,
1012		     dma_addr_t buf_addr, size_t len, size_t period_len,
1013		     enum dma_transfer_direction dir, unsigned long flags)
1014{
1015	struct pxad_chan *chan = to_pxad_chan(dchan);
1016	struct pxad_desc_sw *sw_desc;
1017	struct pxad_desc_hw **phw_desc;
1018	dma_addr_t dma;
1019	u32 dcmd, dsadr = 0, dtadr = 0;
1020	unsigned int nb_desc = 0;
1021
1022	if (!dchan || !len || !period_len)
1023		return NULL;
1024	if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1025		dev_err(&chan->vc.chan.dev->device,
1026			"Unsupported direction for cyclic DMA\n");
1027		return NULL;
1028	}
1029	/* the buffer length must be a multiple of period_len */
1030	if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1031	    !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1032		return NULL;
1033
1034	pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1035	dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1036	dev_dbg(&chan->vc.chan.dev->device,
1037		"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1038		__func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1039
1040	nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1041	nb_desc *= DIV_ROUND_UP(len, period_len);
1042	sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1043	if (!sw_desc)
1044		return NULL;
1045	sw_desc->cyclic = true;
1046	sw_desc->len = len;
1047
1048	phw_desc = sw_desc->hw_desc;
1049	dma = buf_addr;
1050	do {
1051		phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1052		phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1053		phw_desc[0]->dcmd = dcmd;
1054		phw_desc++;
1055		dma += period_len;
1056		len -= period_len;
1057	} while (len);
1058	set_updater_desc(sw_desc, flags);
1059
1060	return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1061}
1062
1063static int pxad_config(struct dma_chan *dchan,
1064		       struct dma_slave_config *cfg)
1065{
1066	struct pxad_chan *chan = to_pxad_chan(dchan);
1067
1068	if (!dchan)
1069		return -EINVAL;
1070
1071	chan->cfg = *cfg;
1072	return 0;
1073}
1074
1075static int pxad_terminate_all(struct dma_chan *dchan)
1076{
1077	struct pxad_chan *chan = to_pxad_chan(dchan);
1078	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1079	struct virt_dma_desc *vd = NULL;
1080	unsigned long flags;
1081	struct pxad_phy *phy;
1082	LIST_HEAD(head);
1083
1084	dev_dbg(&chan->vc.chan.dev->device,
1085		"%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1086
1087	spin_lock_irqsave(&chan->vc.lock, flags);
1088	vchan_get_all_descriptors(&chan->vc, &head);
1089
1090	list_for_each_entry(vd, &head, node) {
1091		dev_dbg(&chan->vc.chan.dev->device,
1092			"%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1093			vd, vd->tx.cookie, is_desc_completed(vd));
1094	}
1095
1096	phy = chan->phy;
1097	if (phy) {
1098		phy_disable(chan->phy);
1099		pxad_free_phy(chan);
1100		chan->phy = NULL;
1101		spin_lock(&pdev->phy_lock);
1102		phy->vchan = NULL;
1103		spin_unlock(&pdev->phy_lock);
1104	}
1105	spin_unlock_irqrestore(&chan->vc.lock, flags);
1106	vchan_dma_desc_free_list(&chan->vc, &head);
1107
1108	return 0;
1109}
1110
1111static unsigned int pxad_residue(struct pxad_chan *chan,
1112				 dma_cookie_t cookie)
1113{
1114	struct virt_dma_desc *vd = NULL;
1115	struct pxad_desc_sw *sw_desc = NULL;
1116	struct pxad_desc_hw *hw_desc = NULL;
1117	u32 curr, start, len, end, residue = 0;
1118	unsigned long flags;
1119	bool passed = false;
1120	int i;
1121
1122	/*
1123	 * If the channel does not have a phy pointer anymore, it has already
1124	 * been completed. Therefore, its residue is 0.
1125	 */
1126	if (!chan->phy)
1127		return 0;
1128
1129	spin_lock_irqsave(&chan->vc.lock, flags);
1130
1131	vd = vchan_find_desc(&chan->vc, cookie);
1132	if (!vd)
1133		goto out;
1134
1135	sw_desc = to_pxad_sw_desc(vd);
1136	if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1137		curr = phy_readl_relaxed(chan->phy, DSADR);
1138	else
1139		curr = phy_readl_relaxed(chan->phy, DTADR);
1140
1141	/*
1142	 * curr has to be actually read before checking descriptor
1143	 * completion, so that a curr inside a status updater
1144	 * descriptor implies the following test returns true, and
1145	 * preventing reordering of curr load and the test.
1146	 */
1147	rmb();
1148	if (is_desc_completed(vd))
1149		goto out;
1150
1151	for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1152		hw_desc = sw_desc->hw_desc[i];
1153		if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1154			start = hw_desc->dsadr;
1155		else
1156			start = hw_desc->dtadr;
1157		len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1158		end = start + len;
1159
1160		/*
1161		 * 'passed' will be latched once we found the descriptor
1162		 * which lies inside the boundaries of the curr
1163		 * pointer. All descriptors that occur in the list
1164		 * _after_ we found that partially handled descriptor
1165		 * are still to be processed and are hence added to the
1166		 * residual bytes counter.
1167		 */
1168
1169		if (passed) {
1170			residue += len;
1171		} else if (curr >= start && curr <= end) {
1172			residue += end - curr;
1173			passed = true;
1174		}
1175	}
1176	if (!passed)
1177		residue = sw_desc->len;
1178
1179out:
1180	spin_unlock_irqrestore(&chan->vc.lock, flags);
1181	dev_dbg(&chan->vc.chan.dev->device,
1182		"%s(): txd %p[%x] sw_desc=%p: %d\n",
1183		__func__, vd, cookie, sw_desc, residue);
1184	return residue;
1185}
1186
1187static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1188				      dma_cookie_t cookie,
1189				      struct dma_tx_state *txstate)
1190{
1191	struct pxad_chan *chan = to_pxad_chan(dchan);
1192	enum dma_status ret;
1193
1194	if (cookie == chan->bus_error)
1195		return DMA_ERROR;
1196
1197	ret = dma_cookie_status(dchan, cookie, txstate);
1198	if (likely(txstate && (ret != DMA_ERROR)))
1199		dma_set_residue(txstate, pxad_residue(chan, cookie));
1200
1201	return ret;
1202}
1203
1204static void pxad_synchronize(struct dma_chan *dchan)
1205{
1206	struct pxad_chan *chan = to_pxad_chan(dchan);
1207
1208	wait_event(chan->wq_state, !is_chan_running(chan));
1209	vchan_synchronize(&chan->vc);
1210}
1211
1212static void pxad_free_channels(struct dma_device *dmadev)
1213{
1214	struct pxad_chan *c, *cn;
1215
1216	list_for_each_entry_safe(c, cn, &dmadev->channels,
1217				 vc.chan.device_node) {
1218		list_del(&c->vc.chan.device_node);
1219		tasklet_kill(&c->vc.task);
1220	}
1221}
1222
1223static int pxad_remove(struct platform_device *op)
1224{
1225	struct pxad_device *pdev = platform_get_drvdata(op);
1226
1227	pxad_cleanup_debugfs(pdev);
1228	pxad_free_channels(&pdev->slave);
1229	return 0;
1230}
1231
1232static int pxad_init_phys(struct platform_device *op,
1233			  struct pxad_device *pdev,
1234			  unsigned int nb_phy_chans)
1235{
1236	int irq0, irq, nr_irq = 0, i, ret;
1237	struct pxad_phy *phy;
1238
1239	irq0 = platform_get_irq(op, 0);
1240	if (irq0 < 0)
1241		return irq0;
1242
1243	pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1244				  sizeof(pdev->phys[0]), GFP_KERNEL);
1245	if (!pdev->phys)
1246		return -ENOMEM;
1247
1248	for (i = 0; i < nb_phy_chans; i++)
1249		if (platform_get_irq_optional(op, i) > 0)
1250			nr_irq++;
1251
1252	for (i = 0; i < nb_phy_chans; i++) {
1253		phy = &pdev->phys[i];
1254		phy->base = pdev->base;
1255		phy->idx = i;
1256		irq = platform_get_irq_optional(op, i);
1257		if ((nr_irq > 1) && (irq > 0))
1258			ret = devm_request_irq(&op->dev, irq,
1259					       pxad_chan_handler,
1260					       IRQF_SHARED, "pxa-dma", phy);
1261		if ((nr_irq == 1) && (i == 0))
1262			ret = devm_request_irq(&op->dev, irq0,
1263					       pxad_int_handler,
1264					       IRQF_SHARED, "pxa-dma", pdev);
1265		if (ret) {
1266			dev_err(pdev->slave.dev,
1267				"%s(): can't request irq %d:%d\n", __func__,
1268				irq, ret);
1269			return ret;
1270		}
1271	}
1272
1273	return 0;
1274}
1275
1276static const struct of_device_id pxad_dt_ids[] = {
1277	{ .compatible = "marvell,pdma-1.0", },
1278	{}
1279};
1280MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1281
1282static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1283					   struct of_dma *ofdma)
1284{
1285	struct pxad_device *d = ofdma->of_dma_data;
1286	struct dma_chan *chan;
1287
1288	chan = dma_get_any_slave_channel(&d->slave);
1289	if (!chan)
1290		return NULL;
1291
1292	to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1293	to_pxad_chan(chan)->prio = dma_spec->args[1];
1294
1295	return chan;
1296}
1297
1298static int pxad_init_dmadev(struct platform_device *op,
1299			    struct pxad_device *pdev,
1300			    unsigned int nr_phy_chans,
1301			    unsigned int nr_requestors)
1302{
1303	int ret;
1304	unsigned int i;
1305	struct pxad_chan *c;
1306
1307	pdev->nr_chans = nr_phy_chans;
1308	pdev->nr_requestors = nr_requestors;
1309	INIT_LIST_HEAD(&pdev->slave.channels);
1310	pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1311	pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1312	pdev->slave.device_tx_status = pxad_tx_status;
1313	pdev->slave.device_issue_pending = pxad_issue_pending;
1314	pdev->slave.device_config = pxad_config;
1315	pdev->slave.device_synchronize = pxad_synchronize;
1316	pdev->slave.device_terminate_all = pxad_terminate_all;
1317
1318	if (op->dev.coherent_dma_mask)
1319		dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1320	else
1321		dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1322
1323	ret = pxad_init_phys(op, pdev, nr_phy_chans);
1324	if (ret)
1325		return ret;
1326
1327	for (i = 0; i < nr_phy_chans; i++) {
1328		c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1329		if (!c)
1330			return -ENOMEM;
1331
1332		c->drcmr = U32_MAX;
1333		c->prio = PXAD_PRIO_LOWEST;
1334		c->vc.desc_free = pxad_free_desc;
1335		vchan_init(&c->vc, &pdev->slave);
1336		init_waitqueue_head(&c->wq_state);
1337	}
1338
1339	return dmaenginem_async_device_register(&pdev->slave);
1340}
1341
1342static int pxad_probe(struct platform_device *op)
1343{
1344	struct pxad_device *pdev;
1345	const struct of_device_id *of_id;
1346	const struct dma_slave_map *slave_map = NULL;
1347	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1348	int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
1349	const enum dma_slave_buswidth widths =
1350		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
1351		DMA_SLAVE_BUSWIDTH_4_BYTES;
1352
1353	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1354	if (!pdev)
1355		return -ENOMEM;
1356
1357	spin_lock_init(&pdev->phy_lock);
1358
1359	pdev->base = devm_platform_ioremap_resource(op, 0);
1360	if (IS_ERR(pdev->base))
1361		return PTR_ERR(pdev->base);
1362
1363	of_id = of_match_device(pxad_dt_ids, &op->dev);
1364	if (of_id) {
1365		/* Parse new and deprecated dma-channels properties */
1366		if (of_property_read_u32(op->dev.of_node, "dma-channels",
1367					 &dma_channels))
1368			of_property_read_u32(op->dev.of_node, "#dma-channels",
1369					     &dma_channels);
1370		/* Parse new and deprecated dma-requests properties */
1371		ret = of_property_read_u32(op->dev.of_node, "dma-requests",
1372					   &nb_requestors);
1373		if (ret)
1374			ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1375						   &nb_requestors);
1376		if (ret) {
1377			dev_warn(pdev->slave.dev,
1378				 "#dma-requests set to default 32 as missing in OF: %d",
1379				 ret);
1380			nb_requestors = 32;
1381		}
1382	} else if (pdata && pdata->dma_channels) {
1383		dma_channels = pdata->dma_channels;
1384		nb_requestors = pdata->nb_requestors;
1385		slave_map = pdata->slave_map;
1386		slave_map_cnt = pdata->slave_map_cnt;
1387	} else {
1388		dma_channels = 32;	/* default 32 channel */
1389	}
1390
1391	dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1392	dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1393	dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1394	dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1395	pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1396	pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1397	pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1398	pdev->slave.filter.map = slave_map;
1399	pdev->slave.filter.mapcnt = slave_map_cnt;
1400	pdev->slave.filter.fn = pxad_filter_fn;
1401
1402	pdev->slave.copy_align = PDMA_ALIGNMENT;
1403	pdev->slave.src_addr_widths = widths;
1404	pdev->slave.dst_addr_widths = widths;
1405	pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1406	pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1407	pdev->slave.descriptor_reuse = true;
1408
1409	pdev->slave.dev = &op->dev;
1410	ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1411	if (ret) {
1412		dev_err(pdev->slave.dev, "unable to register\n");
1413		return ret;
1414	}
1415
1416	if (op->dev.of_node) {
1417		/* Device-tree DMA controller registration */
1418		ret = of_dma_controller_register(op->dev.of_node,
1419						 pxad_dma_xlate, pdev);
1420		if (ret < 0) {
1421			dev_err(pdev->slave.dev,
1422				"of_dma_controller_register failed\n");
1423			return ret;
1424		}
1425	}
1426
1427	platform_set_drvdata(op, pdev);
1428	pxad_init_debugfs(pdev);
1429	dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1430		 dma_channels, nb_requestors);
1431	return 0;
1432}
1433
1434static const struct platform_device_id pxad_id_table[] = {
1435	{ "pxa-dma", },
1436	{ },
1437};
1438
1439static struct platform_driver pxad_driver = {
1440	.driver		= {
1441		.name	= "pxa-dma",
1442		.of_match_table = pxad_dt_ids,
1443	},
1444	.id_table	= pxad_id_table,
1445	.probe		= pxad_probe,
1446	.remove		= pxad_remove,
1447};
1448
1449static bool pxad_filter_fn(struct dma_chan *chan, void *param)
1450{
1451	struct pxad_chan *c = to_pxad_chan(chan);
1452	struct pxad_param *p = param;
1453
1454	if (chan->device->dev->driver != &pxad_driver.driver)
1455		return false;
1456
1457	c->drcmr = p->drcmr;
1458	c->prio = p->prio;
1459
1460	return true;
1461}
1462
1463module_platform_driver(pxad_driver);
1464
1465MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1466MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1467MODULE_LICENSE("GPL v2");
1468