1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for the Analog Devices AXI-DMAC core
4 *
5 * Copyright 2013-2019 Analog Devices Inc.
6 *  Author: Lars-Peter Clausen <lars@metafoo.de>
7 */
8
9#include <linux/bitfield.h>
10#include <linux/clk.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/dmaengine.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_address.h>
22#include <linux/platform_device.h>
23#include <linux/regmap.h>
24#include <linux/slab.h>
25#include <linux/fpga/adi-axi-common.h>
26
27#include <dt-bindings/dma/axi-dmac.h>
28
29#include "dmaengine.h"
30#include "virt-dma.h"
31
32/*
33 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
34 * various instantiation parameters which decided the exact feature set support
35 * by the core.
36 *
37 * Each channel of the core has a source interface and a destination interface.
38 * The number of channels and the type of the channel interfaces is selected at
39 * configuration time. A interface can either be a connected to a central memory
40 * interconnect, which allows access to system memory, or it can be connected to
41 * a dedicated bus which is directly connected to a data port on a peripheral.
42 * Given that those are configuration options of the core that are selected when
43 * it is instantiated this means that they can not be changed by software at
44 * runtime. By extension this means that each channel is uni-directional. It can
45 * either be device to memory or memory to device, but not both. Also since the
46 * device side is a dedicated data bus only connected to a single peripheral
47 * there is no address than can or needs to be configured for the device side.
48 */
49
50#define AXI_DMAC_REG_INTERFACE_DESC	0x10
51#define   AXI_DMAC_DMA_SRC_TYPE_MSK	GENMASK(13, 12)
52#define   AXI_DMAC_DMA_SRC_TYPE_GET(x)	FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
53#define   AXI_DMAC_DMA_SRC_WIDTH_MSK	GENMASK(11, 8)
54#define   AXI_DMAC_DMA_SRC_WIDTH_GET(x)	FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
55#define   AXI_DMAC_DMA_DST_TYPE_MSK	GENMASK(5, 4)
56#define   AXI_DMAC_DMA_DST_TYPE_GET(x)	FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
57#define   AXI_DMAC_DMA_DST_WIDTH_MSK	GENMASK(3, 0)
58#define   AXI_DMAC_DMA_DST_WIDTH_GET(x)	FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
59#define AXI_DMAC_REG_COHERENCY_DESC	0x14
60#define   AXI_DMAC_DST_COHERENT_MSK	BIT(0)
61#define   AXI_DMAC_DST_COHERENT_GET(x)	FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
62
63#define AXI_DMAC_REG_IRQ_MASK		0x80
64#define AXI_DMAC_REG_IRQ_PENDING	0x84
65#define AXI_DMAC_REG_IRQ_SOURCE		0x88
66
67#define AXI_DMAC_REG_CTRL		0x400
68#define AXI_DMAC_REG_TRANSFER_ID	0x404
69#define AXI_DMAC_REG_START_TRANSFER	0x408
70#define AXI_DMAC_REG_FLAGS		0x40c
71#define AXI_DMAC_REG_DEST_ADDRESS	0x410
72#define AXI_DMAC_REG_SRC_ADDRESS	0x414
73#define AXI_DMAC_REG_X_LENGTH		0x418
74#define AXI_DMAC_REG_Y_LENGTH		0x41c
75#define AXI_DMAC_REG_DEST_STRIDE	0x420
76#define AXI_DMAC_REG_SRC_STRIDE		0x424
77#define AXI_DMAC_REG_TRANSFER_DONE	0x428
78#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
79#define AXI_DMAC_REG_STATUS		0x430
80#define AXI_DMAC_REG_CURRENT_SRC_ADDR	0x434
81#define AXI_DMAC_REG_CURRENT_DEST_ADDR	0x438
82#define AXI_DMAC_REG_PARTIAL_XFER_LEN	0x44c
83#define AXI_DMAC_REG_PARTIAL_XFER_ID	0x450
84
85#define AXI_DMAC_CTRL_ENABLE		BIT(0)
86#define AXI_DMAC_CTRL_PAUSE		BIT(1)
87
88#define AXI_DMAC_IRQ_SOT		BIT(0)
89#define AXI_DMAC_IRQ_EOT		BIT(1)
90
91#define AXI_DMAC_FLAG_CYCLIC		BIT(0)
92#define AXI_DMAC_FLAG_LAST		BIT(1)
93#define AXI_DMAC_FLAG_PARTIAL_REPORT	BIT(2)
94
95#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
96
97/* The maximum ID allocated by the hardware is 31 */
98#define AXI_DMAC_SG_UNUSED 32U
99
100struct axi_dmac_sg {
101	dma_addr_t src_addr;
102	dma_addr_t dest_addr;
103	unsigned int x_len;
104	unsigned int y_len;
105	unsigned int dest_stride;
106	unsigned int src_stride;
107	unsigned int id;
108	unsigned int partial_len;
109	bool schedule_when_free;
110};
111
112struct axi_dmac_desc {
113	struct virt_dma_desc vdesc;
114	bool cyclic;
115	bool have_partial_xfer;
116
117	unsigned int num_submitted;
118	unsigned int num_completed;
119	unsigned int num_sgs;
120	struct axi_dmac_sg sg[];
121};
122
123struct axi_dmac_chan {
124	struct virt_dma_chan vchan;
125
126	struct axi_dmac_desc *next_desc;
127	struct list_head active_descs;
128	enum dma_transfer_direction direction;
129
130	unsigned int src_width;
131	unsigned int dest_width;
132	unsigned int src_type;
133	unsigned int dest_type;
134
135	unsigned int max_length;
136	unsigned int address_align_mask;
137	unsigned int length_align_mask;
138
139	bool hw_partial_xfer;
140	bool hw_cyclic;
141	bool hw_2d;
142};
143
144struct axi_dmac {
145	void __iomem *base;
146	int irq;
147
148	struct clk *clk;
149
150	struct dma_device dma_dev;
151	struct axi_dmac_chan chan;
152};
153
154static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
155{
156	return container_of(chan->vchan.chan.device, struct axi_dmac,
157		dma_dev);
158}
159
160static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
161{
162	return container_of(c, struct axi_dmac_chan, vchan.chan);
163}
164
165static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
166{
167	return container_of(vdesc, struct axi_dmac_desc, vdesc);
168}
169
170static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
171	unsigned int val)
172{
173	writel(val, axi_dmac->base + reg);
174}
175
176static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
177{
178	return readl(axi_dmac->base + reg);
179}
180
181static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
182{
183	return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
184}
185
186static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
187{
188	return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
189}
190
191static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
192{
193	if (len == 0)
194		return false;
195	if ((len & chan->length_align_mask) != 0) /* Not aligned */
196		return false;
197	return true;
198}
199
200static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
201{
202	if ((addr & chan->address_align_mask) != 0) /* Not aligned */
203		return false;
204	return true;
205}
206
207static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
208{
209	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
210	struct virt_dma_desc *vdesc;
211	struct axi_dmac_desc *desc;
212	struct axi_dmac_sg *sg;
213	unsigned int flags = 0;
214	unsigned int val;
215
216	val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
217	if (val) /* Queue is full, wait for the next SOT IRQ */
218		return;
219
220	desc = chan->next_desc;
221
222	if (!desc) {
223		vdesc = vchan_next_desc(&chan->vchan);
224		if (!vdesc)
225			return;
226		list_move_tail(&vdesc->node, &chan->active_descs);
227		desc = to_axi_dmac_desc(vdesc);
228	}
229	sg = &desc->sg[desc->num_submitted];
230
231	/* Already queued in cyclic mode. Wait for it to finish */
232	if (sg->id != AXI_DMAC_SG_UNUSED) {
233		sg->schedule_when_free = true;
234		return;
235	}
236
237	desc->num_submitted++;
238	if (desc->num_submitted == desc->num_sgs ||
239	    desc->have_partial_xfer) {
240		if (desc->cyclic)
241			desc->num_submitted = 0; /* Start again */
242		else
243			chan->next_desc = NULL;
244		flags |= AXI_DMAC_FLAG_LAST;
245	} else {
246		chan->next_desc = desc;
247	}
248
249	sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
250
251	if (axi_dmac_dest_is_mem(chan)) {
252		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
253		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
254	}
255
256	if (axi_dmac_src_is_mem(chan)) {
257		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
258		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
259	}
260
261	/*
262	 * If the hardware supports cyclic transfers and there is no callback to
263	 * call and only a single segment, enable hw cyclic mode to avoid
264	 * unnecessary interrupts.
265	 */
266	if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
267		desc->num_sgs == 1)
268		flags |= AXI_DMAC_FLAG_CYCLIC;
269
270	if (chan->hw_partial_xfer)
271		flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
272
273	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
274	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
275	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
276	axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
277}
278
279static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
280{
281	return list_first_entry_or_null(&chan->active_descs,
282		struct axi_dmac_desc, vdesc.node);
283}
284
285static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
286	struct axi_dmac_sg *sg)
287{
288	if (chan->hw_2d)
289		return sg->x_len * sg->y_len;
290	else
291		return sg->x_len;
292}
293
294static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
295{
296	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
297	struct axi_dmac_desc *desc;
298	struct axi_dmac_sg *sg;
299	u32 xfer_done, len, id, i;
300	bool found_sg;
301
302	do {
303		len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
304		id  = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
305
306		found_sg = false;
307		list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
308			for (i = 0; i < desc->num_sgs; i++) {
309				sg = &desc->sg[i];
310				if (sg->id == AXI_DMAC_SG_UNUSED)
311					continue;
312				if (sg->id == id) {
313					desc->have_partial_xfer = true;
314					sg->partial_len = len;
315					found_sg = true;
316					break;
317				}
318			}
319			if (found_sg)
320				break;
321		}
322
323		if (found_sg) {
324			dev_dbg(dmac->dma_dev.dev,
325				"Found partial segment id=%u, len=%u\n",
326				id, len);
327		} else {
328			dev_warn(dmac->dma_dev.dev,
329				 "Not found partial segment id=%u, len=%u\n",
330				 id, len);
331		}
332
333		/* Check if we have any more partial transfers */
334		xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
335		xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
336
337	} while (!xfer_done);
338}
339
340static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
341	struct axi_dmac_desc *active)
342{
343	struct dmaengine_result *rslt = &active->vdesc.tx_result;
344	unsigned int start = active->num_completed - 1;
345	struct axi_dmac_sg *sg;
346	unsigned int i, total;
347
348	rslt->result = DMA_TRANS_NOERROR;
349	rslt->residue = 0;
350
351	/*
352	 * We get here if the last completed segment is partial, which
353	 * means we can compute the residue from that segment onwards
354	 */
355	for (i = start; i < active->num_sgs; i++) {
356		sg = &active->sg[i];
357		total = axi_dmac_total_sg_bytes(chan, sg);
358		rslt->residue += (total - sg->partial_len);
359	}
360}
361
362static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
363	unsigned int completed_transfers)
364{
365	struct axi_dmac_desc *active;
366	struct axi_dmac_sg *sg;
367	bool start_next = false;
368
369	active = axi_dmac_active_desc(chan);
370	if (!active)
371		return false;
372
373	if (chan->hw_partial_xfer &&
374	    (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
375		axi_dmac_dequeue_partial_xfers(chan);
376
377	do {
378		sg = &active->sg[active->num_completed];
379		if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
380			break;
381		if (!(BIT(sg->id) & completed_transfers))
382			break;
383		active->num_completed++;
384		sg->id = AXI_DMAC_SG_UNUSED;
385		if (sg->schedule_when_free) {
386			sg->schedule_when_free = false;
387			start_next = true;
388		}
389
390		if (sg->partial_len)
391			axi_dmac_compute_residue(chan, active);
392
393		if (active->cyclic)
394			vchan_cyclic_callback(&active->vdesc);
395
396		if (active->num_completed == active->num_sgs ||
397		    sg->partial_len) {
398			if (active->cyclic) {
399				active->num_completed = 0; /* wrap around */
400			} else {
401				list_del(&active->vdesc.node);
402				vchan_cookie_complete(&active->vdesc);
403				active = axi_dmac_active_desc(chan);
404			}
405		}
406	} while (active);
407
408	return start_next;
409}
410
411static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
412{
413	struct axi_dmac *dmac = devid;
414	unsigned int pending;
415	bool start_next = false;
416
417	pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
418	if (!pending)
419		return IRQ_NONE;
420
421	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
422
423	spin_lock(&dmac->chan.vchan.lock);
424	/* One or more transfers have finished */
425	if (pending & AXI_DMAC_IRQ_EOT) {
426		unsigned int completed;
427
428		completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
429		start_next = axi_dmac_transfer_done(&dmac->chan, completed);
430	}
431	/* Space has become available in the descriptor queue */
432	if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
433		axi_dmac_start_transfer(&dmac->chan);
434	spin_unlock(&dmac->chan.vchan.lock);
435
436	return IRQ_HANDLED;
437}
438
439static int axi_dmac_terminate_all(struct dma_chan *c)
440{
441	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
442	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
443	unsigned long flags;
444	LIST_HEAD(head);
445
446	spin_lock_irqsave(&chan->vchan.lock, flags);
447	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
448	chan->next_desc = NULL;
449	vchan_get_all_descriptors(&chan->vchan, &head);
450	list_splice_tail_init(&chan->active_descs, &head);
451	spin_unlock_irqrestore(&chan->vchan.lock, flags);
452
453	vchan_dma_desc_free_list(&chan->vchan, &head);
454
455	return 0;
456}
457
458static void axi_dmac_synchronize(struct dma_chan *c)
459{
460	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
461
462	vchan_synchronize(&chan->vchan);
463}
464
465static void axi_dmac_issue_pending(struct dma_chan *c)
466{
467	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
468	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
469	unsigned long flags;
470
471	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
472
473	spin_lock_irqsave(&chan->vchan.lock, flags);
474	if (vchan_issue_pending(&chan->vchan))
475		axi_dmac_start_transfer(chan);
476	spin_unlock_irqrestore(&chan->vchan.lock, flags);
477}
478
479static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
480{
481	struct axi_dmac_desc *desc;
482	unsigned int i;
483
484	desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
485	if (!desc)
486		return NULL;
487
488	for (i = 0; i < num_sgs; i++)
489		desc->sg[i].id = AXI_DMAC_SG_UNUSED;
490
491	desc->num_sgs = num_sgs;
492
493	return desc;
494}
495
496static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
497	enum dma_transfer_direction direction, dma_addr_t addr,
498	unsigned int num_periods, unsigned int period_len,
499	struct axi_dmac_sg *sg)
500{
501	unsigned int num_segments, i;
502	unsigned int segment_size;
503	unsigned int len;
504
505	/* Split into multiple equally sized segments if necessary */
506	num_segments = DIV_ROUND_UP(period_len, chan->max_length);
507	segment_size = DIV_ROUND_UP(period_len, num_segments);
508	/* Take care of alignment */
509	segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
510
511	for (i = 0; i < num_periods; i++) {
512		len = period_len;
513
514		while (len > segment_size) {
515			if (direction == DMA_DEV_TO_MEM)
516				sg->dest_addr = addr;
517			else
518				sg->src_addr = addr;
519			sg->x_len = segment_size;
520			sg->y_len = 1;
521			sg++;
522			addr += segment_size;
523			len -= segment_size;
524		}
525
526		if (direction == DMA_DEV_TO_MEM)
527			sg->dest_addr = addr;
528		else
529			sg->src_addr = addr;
530		sg->x_len = len;
531		sg->y_len = 1;
532		sg++;
533		addr += len;
534	}
535
536	return sg;
537}
538
539static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
540	struct dma_chan *c, struct scatterlist *sgl,
541	unsigned int sg_len, enum dma_transfer_direction direction,
542	unsigned long flags, void *context)
543{
544	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
545	struct axi_dmac_desc *desc;
546	struct axi_dmac_sg *dsg;
547	struct scatterlist *sg;
548	unsigned int num_sgs;
549	unsigned int i;
550
551	if (direction != chan->direction)
552		return NULL;
553
554	num_sgs = 0;
555	for_each_sg(sgl, sg, sg_len, i)
556		num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
557
558	desc = axi_dmac_alloc_desc(num_sgs);
559	if (!desc)
560		return NULL;
561
562	dsg = desc->sg;
563
564	for_each_sg(sgl, sg, sg_len, i) {
565		if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
566		    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
567			kfree(desc);
568			return NULL;
569		}
570
571		dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
572			sg_dma_len(sg), dsg);
573	}
574
575	desc->cyclic = false;
576
577	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
578}
579
580static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
581	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
582	size_t period_len, enum dma_transfer_direction direction,
583	unsigned long flags)
584{
585	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
586	struct axi_dmac_desc *desc;
587	unsigned int num_periods, num_segments;
588
589	if (direction != chan->direction)
590		return NULL;
591
592	if (!axi_dmac_check_len(chan, buf_len) ||
593	    !axi_dmac_check_addr(chan, buf_addr))
594		return NULL;
595
596	if (period_len == 0 || buf_len % period_len)
597		return NULL;
598
599	num_periods = buf_len / period_len;
600	num_segments = DIV_ROUND_UP(period_len, chan->max_length);
601
602	desc = axi_dmac_alloc_desc(num_periods * num_segments);
603	if (!desc)
604		return NULL;
605
606	axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
607		period_len, desc->sg);
608
609	desc->cyclic = true;
610
611	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
612}
613
614static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
615	struct dma_chan *c, struct dma_interleaved_template *xt,
616	unsigned long flags)
617{
618	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
619	struct axi_dmac_desc *desc;
620	size_t dst_icg, src_icg;
621
622	if (xt->frame_size != 1)
623		return NULL;
624
625	if (xt->dir != chan->direction)
626		return NULL;
627
628	if (axi_dmac_src_is_mem(chan)) {
629		if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
630			return NULL;
631	}
632
633	if (axi_dmac_dest_is_mem(chan)) {
634		if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
635			return NULL;
636	}
637
638	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
639	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
640
641	if (chan->hw_2d) {
642		if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
643		    xt->numf == 0)
644			return NULL;
645		if (xt->sgl[0].size + dst_icg > chan->max_length ||
646		    xt->sgl[0].size + src_icg > chan->max_length)
647			return NULL;
648	} else {
649		if (dst_icg != 0 || src_icg != 0)
650			return NULL;
651		if (chan->max_length / xt->sgl[0].size < xt->numf)
652			return NULL;
653		if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
654			return NULL;
655	}
656
657	desc = axi_dmac_alloc_desc(1);
658	if (!desc)
659		return NULL;
660
661	if (axi_dmac_src_is_mem(chan)) {
662		desc->sg[0].src_addr = xt->src_start;
663		desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
664	}
665
666	if (axi_dmac_dest_is_mem(chan)) {
667		desc->sg[0].dest_addr = xt->dst_start;
668		desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
669	}
670
671	if (chan->hw_2d) {
672		desc->sg[0].x_len = xt->sgl[0].size;
673		desc->sg[0].y_len = xt->numf;
674	} else {
675		desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
676		desc->sg[0].y_len = 1;
677	}
678
679	if (flags & DMA_CYCLIC)
680		desc->cyclic = true;
681
682	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
683}
684
685static void axi_dmac_free_chan_resources(struct dma_chan *c)
686{
687	vchan_free_chan_resources(to_virt_chan(c));
688}
689
690static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
691{
692	kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
693}
694
695static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
696{
697	switch (reg) {
698	case AXI_DMAC_REG_IRQ_MASK:
699	case AXI_DMAC_REG_IRQ_SOURCE:
700	case AXI_DMAC_REG_IRQ_PENDING:
701	case AXI_DMAC_REG_CTRL:
702	case AXI_DMAC_REG_TRANSFER_ID:
703	case AXI_DMAC_REG_START_TRANSFER:
704	case AXI_DMAC_REG_FLAGS:
705	case AXI_DMAC_REG_DEST_ADDRESS:
706	case AXI_DMAC_REG_SRC_ADDRESS:
707	case AXI_DMAC_REG_X_LENGTH:
708	case AXI_DMAC_REG_Y_LENGTH:
709	case AXI_DMAC_REG_DEST_STRIDE:
710	case AXI_DMAC_REG_SRC_STRIDE:
711	case AXI_DMAC_REG_TRANSFER_DONE:
712	case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
713	case AXI_DMAC_REG_STATUS:
714	case AXI_DMAC_REG_CURRENT_SRC_ADDR:
715	case AXI_DMAC_REG_CURRENT_DEST_ADDR:
716	case AXI_DMAC_REG_PARTIAL_XFER_LEN:
717	case AXI_DMAC_REG_PARTIAL_XFER_ID:
718		return true;
719	default:
720		return false;
721	}
722}
723
724static const struct regmap_config axi_dmac_regmap_config = {
725	.reg_bits = 32,
726	.val_bits = 32,
727	.reg_stride = 4,
728	.max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
729	.readable_reg = axi_dmac_regmap_rdwr,
730	.writeable_reg = axi_dmac_regmap_rdwr,
731};
732
733static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
734{
735	chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
736
737	if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
738		chan->direction = DMA_MEM_TO_MEM;
739	else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
740		chan->direction = DMA_MEM_TO_DEV;
741	else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
742		chan->direction = DMA_DEV_TO_MEM;
743	else
744		chan->direction = DMA_DEV_TO_DEV;
745}
746
747/*
748 * The configuration stored in the devicetree matches the configuration
749 * parameters of the peripheral instance and allows the driver to know which
750 * features are implemented and how it should behave.
751 */
752static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
753	struct axi_dmac_chan *chan)
754{
755	u32 val;
756	int ret;
757
758	ret = of_property_read_u32(of_chan, "reg", &val);
759	if (ret)
760		return ret;
761
762	/* We only support 1 channel for now */
763	if (val != 0)
764		return -EINVAL;
765
766	ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
767	if (ret)
768		return ret;
769	if (val > AXI_DMAC_BUS_TYPE_FIFO)
770		return -EINVAL;
771	chan->src_type = val;
772
773	ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
774	if (ret)
775		return ret;
776	if (val > AXI_DMAC_BUS_TYPE_FIFO)
777		return -EINVAL;
778	chan->dest_type = val;
779
780	ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
781	if (ret)
782		return ret;
783	chan->src_width = val / 8;
784
785	ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
786	if (ret)
787		return ret;
788	chan->dest_width = val / 8;
789
790	axi_dmac_adjust_chan_params(chan);
791
792	return 0;
793}
794
795static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
796{
797	struct device_node *of_channels, *of_chan;
798	int ret;
799
800	of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
801	if (of_channels == NULL)
802		return -ENODEV;
803
804	for_each_child_of_node(of_channels, of_chan) {
805		ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
806		if (ret) {
807			of_node_put(of_chan);
808			of_node_put(of_channels);
809			return -EINVAL;
810		}
811	}
812	of_node_put(of_channels);
813
814	return 0;
815}
816
817static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
818{
819	struct axi_dmac_chan *chan = &dmac->chan;
820	unsigned int val, desc;
821
822	desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
823	if (desc == 0) {
824		dev_err(dev, "DMA interface register reads zero\n");
825		return -EFAULT;
826	}
827
828	val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
829	if (val > AXI_DMAC_BUS_TYPE_FIFO) {
830		dev_err(dev, "Invalid source bus type read: %d\n", val);
831		return -EINVAL;
832	}
833	chan->src_type = val;
834
835	val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
836	if (val > AXI_DMAC_BUS_TYPE_FIFO) {
837		dev_err(dev, "Invalid destination bus type read: %d\n", val);
838		return -EINVAL;
839	}
840	chan->dest_type = val;
841
842	val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
843	if (val == 0) {
844		dev_err(dev, "Source bus width is zero\n");
845		return -EINVAL;
846	}
847	/* widths are stored in log2 */
848	chan->src_width = 1 << val;
849
850	val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
851	if (val == 0) {
852		dev_err(dev, "Destination bus width is zero\n");
853		return -EINVAL;
854	}
855	chan->dest_width = 1 << val;
856
857	axi_dmac_adjust_chan_params(chan);
858
859	return 0;
860}
861
862static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
863{
864	struct axi_dmac_chan *chan = &dmac->chan;
865
866	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
867	if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
868		chan->hw_cyclic = true;
869
870	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
871	if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
872		chan->hw_2d = true;
873
874	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
875	chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
876	if (chan->max_length != UINT_MAX)
877		chan->max_length++;
878
879	axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
880	if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
881	    chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
882		dev_err(dmac->dma_dev.dev,
883			"Destination memory-mapped interface not supported.");
884		return -ENODEV;
885	}
886
887	axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
888	if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
889	    chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
890		dev_err(dmac->dma_dev.dev,
891			"Source memory-mapped interface not supported.");
892		return -ENODEV;
893	}
894
895	if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
896		chan->hw_partial_xfer = true;
897
898	if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
899		axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
900		chan->length_align_mask =
901			axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
902	} else {
903		chan->length_align_mask = chan->address_align_mask;
904	}
905
906	return 0;
907}
908
909static int axi_dmac_probe(struct platform_device *pdev)
910{
911	struct dma_device *dma_dev;
912	struct axi_dmac *dmac;
913	struct regmap *regmap;
914	unsigned int version;
915	int ret;
916
917	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
918	if (!dmac)
919		return -ENOMEM;
920
921	dmac->irq = platform_get_irq(pdev, 0);
922	if (dmac->irq < 0)
923		return dmac->irq;
924	if (dmac->irq == 0)
925		return -EINVAL;
926
927	dmac->base = devm_platform_ioremap_resource(pdev, 0);
928	if (IS_ERR(dmac->base))
929		return PTR_ERR(dmac->base);
930
931	dmac->clk = devm_clk_get(&pdev->dev, NULL);
932	if (IS_ERR(dmac->clk))
933		return PTR_ERR(dmac->clk);
934
935	ret = clk_prepare_enable(dmac->clk);
936	if (ret < 0)
937		return ret;
938
939	version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
940
941	if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
942		ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
943	else
944		ret = axi_dmac_parse_dt(&pdev->dev, dmac);
945
946	if (ret < 0)
947		goto err_clk_disable;
948
949	INIT_LIST_HEAD(&dmac->chan.active_descs);
950
951	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
952
953	dma_dev = &dmac->dma_dev;
954	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
955	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
956	dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
957	dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
958	dma_dev->device_tx_status = dma_cookie_status;
959	dma_dev->device_issue_pending = axi_dmac_issue_pending;
960	dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
961	dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
962	dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
963	dma_dev->device_terminate_all = axi_dmac_terminate_all;
964	dma_dev->device_synchronize = axi_dmac_synchronize;
965	dma_dev->dev = &pdev->dev;
966	dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
967	dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
968	dma_dev->directions = BIT(dmac->chan.direction);
969	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
970	INIT_LIST_HEAD(&dma_dev->channels);
971
972	dmac->chan.vchan.desc_free = axi_dmac_desc_free;
973	vchan_init(&dmac->chan.vchan, dma_dev);
974
975	ret = axi_dmac_detect_caps(dmac, version);
976	if (ret)
977		goto err_clk_disable;
978
979	dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
980
981	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
982
983	if (of_dma_is_coherent(pdev->dev.of_node)) {
984		ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
985
986		if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
987		    !AXI_DMAC_DST_COHERENT_GET(ret)) {
988			dev_err(dmac->dma_dev.dev,
989				"Coherent DMA not supported in hardware");
990			ret = -EINVAL;
991			goto err_clk_disable;
992		}
993	}
994
995	ret = dma_async_device_register(dma_dev);
996	if (ret)
997		goto err_clk_disable;
998
999	ret = of_dma_controller_register(pdev->dev.of_node,
1000		of_dma_xlate_by_chan_id, dma_dev);
1001	if (ret)
1002		goto err_unregister_device;
1003
1004	ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
1005		dev_name(&pdev->dev), dmac);
1006	if (ret)
1007		goto err_unregister_of;
1008
1009	platform_set_drvdata(pdev, dmac);
1010
1011	regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
1012		 &axi_dmac_regmap_config);
1013	if (IS_ERR(regmap)) {
1014		ret = PTR_ERR(regmap);
1015		goto err_free_irq;
1016	}
1017
1018	return 0;
1019
1020err_free_irq:
1021	free_irq(dmac->irq, dmac);
1022err_unregister_of:
1023	of_dma_controller_free(pdev->dev.of_node);
1024err_unregister_device:
1025	dma_async_device_unregister(&dmac->dma_dev);
1026err_clk_disable:
1027	clk_disable_unprepare(dmac->clk);
1028
1029	return ret;
1030}
1031
1032static int axi_dmac_remove(struct platform_device *pdev)
1033{
1034	struct axi_dmac *dmac = platform_get_drvdata(pdev);
1035
1036	of_dma_controller_free(pdev->dev.of_node);
1037	free_irq(dmac->irq, dmac);
1038	tasklet_kill(&dmac->chan.vchan.task);
1039	dma_async_device_unregister(&dmac->dma_dev);
1040	clk_disable_unprepare(dmac->clk);
1041
1042	return 0;
1043}
1044
1045static const struct of_device_id axi_dmac_of_match_table[] = {
1046	{ .compatible = "adi,axi-dmac-1.00.a" },
1047	{ },
1048};
1049MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
1050
1051static struct platform_driver axi_dmac_driver = {
1052	.driver = {
1053		.name = "dma-axi-dmac",
1054		.of_match_table = axi_dmac_of_match_table,
1055	},
1056	.probe = axi_dmac_probe,
1057	.remove = axi_dmac_remove,
1058};
1059module_platform_driver(axi_dmac_driver);
1060
1061MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1062MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
1063MODULE_LICENSE("GPL v2");
1064