1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008
4 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
5 *
6 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
7 */
8
9#include <linux/dma-mapping.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/err.h>
13#include <linux/spinlock.h>
14#include <linux/delay.h>
15#include <linux/list.h>
16#include <linux/clk.h>
17#include <linux/vmalloc.h>
18#include <linux/string.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/dma/ipu-dma.h>
23
24#include "../dmaengine.h"
25#include "ipu_intern.h"
26
27#define FS_VF_IN_VALID	0x00000002
28#define FS_ENC_IN_VALID	0x00000001
29
30static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
31			       bool wait_for_stop);
32
33/*
34 * There can be only one, we could allocate it dynamically, but then we'd have
35 * to add an extra parameter to some functions, and use something as ugly as
36 *	struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
37 * in the ISR
38 */
39static struct ipu ipu_data;
40
41#define to_ipu(id) container_of(id, struct ipu, idmac)
42
43static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
44{
45	return __raw_readl(ipu->reg_ic + reg);
46}
47
48#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
49
50static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
51{
52	__raw_writel(value, ipu->reg_ic + reg);
53}
54
55#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
56
57static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
58{
59	return __raw_readl(ipu->reg_ipu + reg);
60}
61
62static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
63{
64	__raw_writel(value, ipu->reg_ipu + reg);
65}
66
67/*****************************************************************************
68 * IPU / IC common functions
69 */
70static void dump_idmac_reg(struct ipu *ipu)
71{
72	dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
73		"IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
74		idmac_read_icreg(ipu, IDMAC_CONF),
75		idmac_read_icreg(ipu, IC_CONF),
76		idmac_read_icreg(ipu, IDMAC_CHA_EN),
77		idmac_read_icreg(ipu, IDMAC_CHA_PRI),
78		idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
79	dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
80		"DB_MODE 0x%x, TASKS_STAT 0x%x\n",
81		idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
82		idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
83		idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
84		idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
85		idmac_read_ipureg(ipu, IPU_TASKS_STAT));
86}
87
88static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
89{
90	switch (fmt) {
91	case IPU_PIX_FMT_GENERIC:	/* generic data */
92	case IPU_PIX_FMT_RGB332:
93	case IPU_PIX_FMT_YUV420P:
94	case IPU_PIX_FMT_YUV422P:
95	default:
96		return 1;
97	case IPU_PIX_FMT_RGB565:
98	case IPU_PIX_FMT_YUYV:
99	case IPU_PIX_FMT_UYVY:
100		return 2;
101	case IPU_PIX_FMT_BGR24:
102	case IPU_PIX_FMT_RGB24:
103		return 3;
104	case IPU_PIX_FMT_GENERIC_32:	/* generic data */
105	case IPU_PIX_FMT_BGR32:
106	case IPU_PIX_FMT_RGB32:
107	case IPU_PIX_FMT_ABGR32:
108		return 4;
109	}
110}
111
112/* Enable direct write to memory by the Camera Sensor Interface */
113static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
114{
115	uint32_t ic_conf, mask;
116
117	switch (channel) {
118	case IDMAC_IC_0:
119		mask = IC_CONF_PRPENC_EN;
120		break;
121	case IDMAC_IC_7:
122		mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
123		break;
124	default:
125		return;
126	}
127	ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
128	idmac_write_icreg(ipu, ic_conf, IC_CONF);
129}
130
131/* Called under spin_lock_irqsave(&ipu_data.lock) */
132static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
133{
134	uint32_t ic_conf, mask;
135
136	switch (channel) {
137	case IDMAC_IC_0:
138		mask = IC_CONF_PRPENC_EN;
139		break;
140	case IDMAC_IC_7:
141		mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
142		break;
143	default:
144		return;
145	}
146	ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
147	idmac_write_icreg(ipu, ic_conf, IC_CONF);
148}
149
150static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
151{
152	uint32_t stat = TASK_STAT_IDLE;
153	uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
154
155	switch (channel) {
156	case IDMAC_IC_7:
157		stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
158			TSTAT_CSI2MEM_OFFSET;
159		break;
160	case IDMAC_IC_0:
161	case IDMAC_SDC_0:
162	case IDMAC_SDC_1:
163	default:
164		break;
165	}
166	return stat;
167}
168
169struct chan_param_mem_planar {
170	/* Word 0 */
171	u32	xv:10;
172	u32	yv:10;
173	u32	xb:12;
174
175	u32	yb:12;
176	u32	res1:2;
177	u32	nsb:1;
178	u32	lnpb:6;
179	u32	ubo_l:11;
180
181	u32	ubo_h:15;
182	u32	vbo_l:17;
183
184	u32	vbo_h:9;
185	u32	res2:3;
186	u32	fw:12;
187	u32	fh_l:8;
188
189	u32	fh_h:4;
190	u32	res3:28;
191
192	/* Word 1 */
193	u32	eba0;
194
195	u32	eba1;
196
197	u32	bpp:3;
198	u32	sl:14;
199	u32	pfs:3;
200	u32	bam:3;
201	u32	res4:2;
202	u32	npb:6;
203	u32	res5:1;
204
205	u32	sat:2;
206	u32	res6:30;
207} __attribute__ ((packed));
208
209struct chan_param_mem_interleaved {
210	/* Word 0 */
211	u32	xv:10;
212	u32	yv:10;
213	u32	xb:12;
214
215	u32	yb:12;
216	u32	sce:1;
217	u32	res1:1;
218	u32	nsb:1;
219	u32	lnpb:6;
220	u32	sx:10;
221	u32	sy_l:1;
222
223	u32	sy_h:9;
224	u32	ns:10;
225	u32	sm:10;
226	u32	sdx_l:3;
227
228	u32	sdx_h:2;
229	u32	sdy:5;
230	u32	sdrx:1;
231	u32	sdry:1;
232	u32	sdr1:1;
233	u32	res2:2;
234	u32	fw:12;
235	u32	fh_l:8;
236
237	u32	fh_h:4;
238	u32	res3:28;
239
240	/* Word 1 */
241	u32	eba0;
242
243	u32	eba1;
244
245	u32	bpp:3;
246	u32	sl:14;
247	u32	pfs:3;
248	u32	bam:3;
249	u32	res4:2;
250	u32	npb:6;
251	u32	res5:1;
252
253	u32	sat:2;
254	u32	scc:1;
255	u32	ofs0:5;
256	u32	ofs1:5;
257	u32	ofs2:5;
258	u32	ofs3:5;
259	u32	wid0:3;
260	u32	wid1:3;
261	u32	wid2:3;
262
263	u32	wid3:3;
264	u32	dec_sel:1;
265	u32	res6:28;
266} __attribute__ ((packed));
267
268union chan_param_mem {
269	struct chan_param_mem_planar		pp;
270	struct chan_param_mem_interleaved	ip;
271};
272
273static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
274					  u32 u_offset, u32 v_offset)
275{
276	params->pp.ubo_l = u_offset & 0x7ff;
277	params->pp.ubo_h = u_offset >> 11;
278	params->pp.vbo_l = v_offset & 0x1ffff;
279	params->pp.vbo_h = v_offset >> 17;
280}
281
282static void ipu_ch_param_set_size(union chan_param_mem *params,
283				  uint32_t pixel_fmt, uint16_t width,
284				  uint16_t height, uint16_t stride)
285{
286	u32 u_offset;
287	u32 v_offset;
288
289	params->pp.fw		= width - 1;
290	params->pp.fh_l		= height - 1;
291	params->pp.fh_h		= (height - 1) >> 8;
292	params->pp.sl		= stride - 1;
293
294	switch (pixel_fmt) {
295	case IPU_PIX_FMT_GENERIC:
296		/*Represents 8-bit Generic data */
297		params->pp.bpp	= 3;
298		params->pp.pfs	= 7;
299		params->pp.npb	= 31;
300		params->pp.sat	= 2;		/* SAT = use 32-bit access */
301		break;
302	case IPU_PIX_FMT_GENERIC_32:
303		/*Represents 32-bit Generic data */
304		params->pp.bpp	= 0;
305		params->pp.pfs	= 7;
306		params->pp.npb	= 7;
307		params->pp.sat	= 2;		/* SAT = use 32-bit access */
308		break;
309	case IPU_PIX_FMT_RGB565:
310		params->ip.bpp	= 2;
311		params->ip.pfs	= 4;
312		params->ip.npb	= 15;
313		params->ip.sat	= 2;		/* SAT = 32-bit access */
314		params->ip.ofs0	= 0;		/* Red bit offset */
315		params->ip.ofs1	= 5;		/* Green bit offset */
316		params->ip.ofs2	= 11;		/* Blue bit offset */
317		params->ip.ofs3	= 16;		/* Alpha bit offset */
318		params->ip.wid0	= 4;		/* Red bit width - 1 */
319		params->ip.wid1	= 5;		/* Green bit width - 1 */
320		params->ip.wid2	= 4;		/* Blue bit width - 1 */
321		break;
322	case IPU_PIX_FMT_BGR24:
323		params->ip.bpp	= 1;		/* 24 BPP & RGB PFS */
324		params->ip.pfs	= 4;
325		params->ip.npb	= 7;
326		params->ip.sat	= 2;		/* SAT = 32-bit access */
327		params->ip.ofs0	= 0;		/* Red bit offset */
328		params->ip.ofs1	= 8;		/* Green bit offset */
329		params->ip.ofs2	= 16;		/* Blue bit offset */
330		params->ip.ofs3	= 24;		/* Alpha bit offset */
331		params->ip.wid0	= 7;		/* Red bit width - 1 */
332		params->ip.wid1	= 7;		/* Green bit width - 1 */
333		params->ip.wid2	= 7;		/* Blue bit width - 1 */
334		break;
335	case IPU_PIX_FMT_RGB24:
336		params->ip.bpp	= 1;		/* 24 BPP & RGB PFS */
337		params->ip.pfs	= 4;
338		params->ip.npb	= 7;
339		params->ip.sat	= 2;		/* SAT = 32-bit access */
340		params->ip.ofs0	= 16;		/* Red bit offset */
341		params->ip.ofs1	= 8;		/* Green bit offset */
342		params->ip.ofs2	= 0;		/* Blue bit offset */
343		params->ip.ofs3	= 24;		/* Alpha bit offset */
344		params->ip.wid0	= 7;		/* Red bit width - 1 */
345		params->ip.wid1	= 7;		/* Green bit width - 1 */
346		params->ip.wid2	= 7;		/* Blue bit width - 1 */
347		break;
348	case IPU_PIX_FMT_BGRA32:
349	case IPU_PIX_FMT_BGR32:
350	case IPU_PIX_FMT_ABGR32:
351		params->ip.bpp	= 0;
352		params->ip.pfs	= 4;
353		params->ip.npb	= 7;
354		params->ip.sat	= 2;		/* SAT = 32-bit access */
355		params->ip.ofs0	= 8;		/* Red bit offset */
356		params->ip.ofs1	= 16;		/* Green bit offset */
357		params->ip.ofs2	= 24;		/* Blue bit offset */
358		params->ip.ofs3	= 0;		/* Alpha bit offset */
359		params->ip.wid0	= 7;		/* Red bit width - 1 */
360		params->ip.wid1	= 7;		/* Green bit width - 1 */
361		params->ip.wid2	= 7;		/* Blue bit width - 1 */
362		params->ip.wid3	= 7;		/* Alpha bit width - 1 */
363		break;
364	case IPU_PIX_FMT_RGBA32:
365	case IPU_PIX_FMT_RGB32:
366		params->ip.bpp	= 0;
367		params->ip.pfs	= 4;
368		params->ip.npb	= 7;
369		params->ip.sat	= 2;		/* SAT = 32-bit access */
370		params->ip.ofs0	= 24;		/* Red bit offset */
371		params->ip.ofs1	= 16;		/* Green bit offset */
372		params->ip.ofs2	= 8;		/* Blue bit offset */
373		params->ip.ofs3	= 0;		/* Alpha bit offset */
374		params->ip.wid0	= 7;		/* Red bit width - 1 */
375		params->ip.wid1	= 7;		/* Green bit width - 1 */
376		params->ip.wid2	= 7;		/* Blue bit width - 1 */
377		params->ip.wid3	= 7;		/* Alpha bit width - 1 */
378		break;
379	case IPU_PIX_FMT_UYVY:
380		params->ip.bpp	= 2;
381		params->ip.pfs	= 6;
382		params->ip.npb	= 7;
383		params->ip.sat	= 2;		/* SAT = 32-bit access */
384		break;
385	case IPU_PIX_FMT_YUV420P2:
386	case IPU_PIX_FMT_YUV420P:
387		params->ip.bpp	= 3;
388		params->ip.pfs	= 3;
389		params->ip.npb	= 7;
390		params->ip.sat	= 2;		/* SAT = 32-bit access */
391		u_offset = stride * height;
392		v_offset = u_offset + u_offset / 4;
393		ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
394		break;
395	case IPU_PIX_FMT_YVU422P:
396		params->ip.bpp	= 3;
397		params->ip.pfs	= 2;
398		params->ip.npb	= 7;
399		params->ip.sat	= 2;		/* SAT = 32-bit access */
400		v_offset = stride * height;
401		u_offset = v_offset + v_offset / 2;
402		ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
403		break;
404	case IPU_PIX_FMT_YUV422P:
405		params->ip.bpp	= 3;
406		params->ip.pfs	= 2;
407		params->ip.npb	= 7;
408		params->ip.sat	= 2;		/* SAT = 32-bit access */
409		u_offset = stride * height;
410		v_offset = u_offset + u_offset / 2;
411		ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
412		break;
413	default:
414		dev_err(ipu_data.dev,
415			"mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
416		break;
417	}
418
419	params->pp.nsb = 1;
420}
421
422static void ipu_ch_param_set_buffer(union chan_param_mem *params,
423				    dma_addr_t buf0, dma_addr_t buf1)
424{
425	params->pp.eba0 = buf0;
426	params->pp.eba1 = buf1;
427}
428
429static void ipu_ch_param_set_rotation(union chan_param_mem *params,
430				      enum ipu_rotate_mode rotate)
431{
432	params->pp.bam = rotate;
433}
434
435static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
436				uint32_t num_words)
437{
438	for (; num_words > 0; num_words--) {
439		dev_dbg(ipu_data.dev,
440			"write param mem - addr = 0x%08X, data = 0x%08X\n",
441			addr, *data);
442		idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
443		idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
444		addr++;
445		if ((addr & 0x7) == 5) {
446			addr &= ~0x7;	/* set to word 0 */
447			addr += 8;	/* increment to next row */
448		}
449	}
450}
451
452static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
453			      uint32_t *resize_coeff,
454			      uint32_t *downsize_coeff)
455{
456	uint32_t temp_size;
457	uint32_t temp_downsize;
458
459	*resize_coeff	= 1 << 13;
460	*downsize_coeff	= 1 << 13;
461
462	/* Cannot downsize more than 8:1 */
463	if (out_size << 3 < in_size)
464		return -EINVAL;
465
466	/* compute downsizing coefficient */
467	temp_downsize = 0;
468	temp_size = in_size;
469	while (temp_size >= out_size * 2 && temp_downsize < 2) {
470		temp_size >>= 1;
471		temp_downsize++;
472	}
473	*downsize_coeff = temp_downsize;
474
475	/*
476	 * compute resizing coefficient using the following formula:
477	 * resize_coeff = M*(SI -1)/(SO - 1)
478	 * where M = 2^13, SI - input size, SO - output size
479	 */
480	*resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
481	if (*resize_coeff >= 16384L) {
482		dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
483		*resize_coeff = 0x3FFF;
484	}
485
486	dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
487		"downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
488		*downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
489		((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
490
491	return 0;
492}
493
494static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
495{
496	switch (fmt) {
497	case IPU_PIX_FMT_RGB565:
498	case IPU_PIX_FMT_BGR24:
499	case IPU_PIX_FMT_RGB24:
500	case IPU_PIX_FMT_BGR32:
501	case IPU_PIX_FMT_RGB32:
502		return IPU_COLORSPACE_RGB;
503	default:
504		return IPU_COLORSPACE_YCBCR;
505	}
506}
507
508static int ipu_ic_init_prpenc(struct ipu *ipu,
509			      union ipu_channel_param *params, bool src_is_csi)
510{
511	uint32_t reg, ic_conf;
512	uint32_t downsize_coeff, resize_coeff;
513	enum ipu_color_space in_fmt, out_fmt;
514
515	/* Setup vertical resizing */
516	calc_resize_coeffs(params->video.in_height,
517			    params->video.out_height,
518			    &resize_coeff, &downsize_coeff);
519	reg = (downsize_coeff << 30) | (resize_coeff << 16);
520
521	/* Setup horizontal resizing */
522	calc_resize_coeffs(params->video.in_width,
523			    params->video.out_width,
524			    &resize_coeff, &downsize_coeff);
525	reg |= (downsize_coeff << 14) | resize_coeff;
526
527	/* Setup color space conversion */
528	in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
529	out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
530
531	/*
532	 * Colourspace conversion unsupported yet - see _init_csc() in
533	 * Freescale sources
534	 */
535	if (in_fmt != out_fmt) {
536		dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
537		return -EOPNOTSUPP;
538	}
539
540	idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
541
542	ic_conf = idmac_read_icreg(ipu, IC_CONF);
543
544	if (src_is_csi)
545		ic_conf &= ~IC_CONF_RWS_EN;
546	else
547		ic_conf |= IC_CONF_RWS_EN;
548
549	idmac_write_icreg(ipu, ic_conf, IC_CONF);
550
551	return 0;
552}
553
554static uint32_t dma_param_addr(uint32_t dma_ch)
555{
556	/* Channel Parameter Memory */
557	return 0x10000 | (dma_ch << 4);
558}
559
560static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
561				     bool prio)
562{
563	u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
564
565	if (prio)
566		reg |= 1UL << channel;
567	else
568		reg &= ~(1UL << channel);
569
570	idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
571
572	dump_idmac_reg(ipu);
573}
574
575static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
576{
577	uint32_t mask;
578
579	switch (channel) {
580	case IDMAC_IC_0:
581	case IDMAC_IC_7:
582		mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
583		break;
584	case IDMAC_SDC_0:
585	case IDMAC_SDC_1:
586		mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
587		break;
588	default:
589		mask = 0;
590		break;
591	}
592
593	return mask;
594}
595
596/**
597 * ipu_enable_channel() - enable an IPU channel.
598 * @idmac:	IPU DMAC context.
599 * @ichan:	IDMAC channel.
600 * @return:	0 on success or negative error code on failure.
601 */
602static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
603{
604	struct ipu *ipu = to_ipu(idmac);
605	enum ipu_channel channel = ichan->dma_chan.chan_id;
606	uint32_t reg;
607	unsigned long flags;
608
609	spin_lock_irqsave(&ipu->lock, flags);
610
611	/* Reset to buffer 0 */
612	idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
613	ichan->active_buffer = 0;
614	ichan->status = IPU_CHANNEL_ENABLED;
615
616	switch (channel) {
617	case IDMAC_SDC_0:
618	case IDMAC_SDC_1:
619	case IDMAC_IC_7:
620		ipu_channel_set_priority(ipu, channel, true);
621	default:
622		break;
623	}
624
625	reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
626
627	idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
628
629	ipu_ic_enable_task(ipu, channel);
630
631	spin_unlock_irqrestore(&ipu->lock, flags);
632	return 0;
633}
634
635/**
636 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
637 * @ichan:	IDMAC channel.
638 * @pixel_fmt:	pixel format of buffer. Pixel format is a FOURCC ASCII code.
639 * @width:	width of buffer in pixels.
640 * @height:	height of buffer in pixels.
641 * @stride:	stride length of buffer in pixels.
642 * @rot_mode:	rotation mode of buffer. A rotation setting other than
643 *		IPU_ROTATE_VERT_FLIP should only be used for input buffers of
644 *		rotation channels.
645 * @phyaddr_0:	buffer 0 physical address.
646 * @phyaddr_1:	buffer 1 physical address. Setting this to a value other than
647 *		NULL enables double buffering mode.
648 * @return:	0 on success or negative error code on failure.
649 */
650static int ipu_init_channel_buffer(struct idmac_channel *ichan,
651				   enum pixel_fmt pixel_fmt,
652				   uint16_t width, uint16_t height,
653				   uint32_t stride,
654				   enum ipu_rotate_mode rot_mode,
655				   dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
656{
657	enum ipu_channel channel = ichan->dma_chan.chan_id;
658	struct idmac *idmac = to_idmac(ichan->dma_chan.device);
659	struct ipu *ipu = to_ipu(idmac);
660	union chan_param_mem params = {};
661	unsigned long flags;
662	uint32_t reg;
663	uint32_t stride_bytes;
664
665	stride_bytes = stride * bytes_per_pixel(pixel_fmt);
666
667	if (stride_bytes % 4) {
668		dev_err(ipu->dev,
669			"Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
670			stride, stride_bytes);
671		return -EINVAL;
672	}
673
674	/* IC channel's stride must be a multiple of 8 pixels */
675	if ((channel <= IDMAC_IC_13) && (stride % 8)) {
676		dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
677		return -EINVAL;
678	}
679
680	/* Build parameter memory data for DMA channel */
681	ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
682	ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
683	ipu_ch_param_set_rotation(&params, rot_mode);
684
685	spin_lock_irqsave(&ipu->lock, flags);
686
687	ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
688
689	reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
690
691	if (phyaddr_1)
692		reg |= 1UL << channel;
693	else
694		reg &= ~(1UL << channel);
695
696	idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
697
698	ichan->status = IPU_CHANNEL_READY;
699
700	spin_unlock_irqrestore(&ipu->lock, flags);
701
702	return 0;
703}
704
705/**
706 * ipu_select_buffer() - mark a channel's buffer as ready.
707 * @channel:	channel ID.
708 * @buffer_n:	buffer number to mark ready.
709 */
710static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
711{
712	/* No locking - this is a write-one-to-set register, cleared by IPU */
713	if (buffer_n == 0)
714		/* Mark buffer 0 as ready. */
715		idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
716	else
717		/* Mark buffer 1 as ready. */
718		idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
719}
720
721/**
722 * ipu_update_channel_buffer() - update physical address of a channel buffer.
723 * @ichan:	IDMAC channel.
724 * @buffer_n:	buffer number to update.
725 *		0 or 1 are the only valid values.
726 * @phyaddr:	buffer physical address.
727 */
728/* Called under spin_lock(_irqsave)(&ichan->lock) */
729static void ipu_update_channel_buffer(struct idmac_channel *ichan,
730				      int buffer_n, dma_addr_t phyaddr)
731{
732	enum ipu_channel channel = ichan->dma_chan.chan_id;
733	uint32_t reg;
734	unsigned long flags;
735
736	spin_lock_irqsave(&ipu_data.lock, flags);
737
738	if (buffer_n == 0) {
739		reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
740		if (reg & (1UL << channel)) {
741			ipu_ic_disable_task(&ipu_data, channel);
742			ichan->status = IPU_CHANNEL_READY;
743		}
744
745		/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
746		idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
747				   0x0008UL, IPU_IMA_ADDR);
748		idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
749	} else {
750		reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
751		if (reg & (1UL << channel)) {
752			ipu_ic_disable_task(&ipu_data, channel);
753			ichan->status = IPU_CHANNEL_READY;
754		}
755
756		/* Check if double-buffering is already enabled */
757		reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
758
759		if (!(reg & (1UL << channel)))
760			idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
761					   IPU_CHA_DB_MODE_SEL);
762
763		/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
764		idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
765				   0x0009UL, IPU_IMA_ADDR);
766		idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
767	}
768
769	spin_unlock_irqrestore(&ipu_data.lock, flags);
770}
771
772/* Called under spin_lock_irqsave(&ichan->lock) */
773static int ipu_submit_buffer(struct idmac_channel *ichan,
774	struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
775{
776	unsigned int chan_id = ichan->dma_chan.chan_id;
777	struct device *dev = &ichan->dma_chan.dev->device;
778
779	if (async_tx_test_ack(&desc->txd))
780		return -EINTR;
781
782	/*
783	 * On first invocation this shouldn't be necessary, the call to
784	 * ipu_init_channel_buffer() above will set addresses for us, so we
785	 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
786	 * doing it again shouldn't hurt either.
787	 */
788	ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
789
790	ipu_select_buffer(chan_id, buf_idx);
791	dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
792		sg, chan_id, buf_idx);
793
794	return 0;
795}
796
797/* Called under spin_lock_irqsave(&ichan->lock) */
798static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
799				      struct idmac_tx_desc *desc)
800{
801	struct scatterlist *sg;
802	int i, ret = 0;
803
804	for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
805		if (!ichan->sg[i]) {
806			ichan->sg[i] = sg;
807
808			ret = ipu_submit_buffer(ichan, desc, sg, i);
809			if (ret < 0)
810				return ret;
811
812			sg = sg_next(sg);
813		}
814	}
815
816	return ret;
817}
818
819static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
820{
821	struct idmac_tx_desc *desc = to_tx_desc(tx);
822	struct idmac_channel *ichan = to_idmac_chan(tx->chan);
823	struct idmac *idmac = to_idmac(tx->chan->device);
824	struct ipu *ipu = to_ipu(idmac);
825	struct device *dev = &ichan->dma_chan.dev->device;
826	dma_cookie_t cookie;
827	unsigned long flags;
828	int ret;
829
830	/* Sanity check */
831	if (!list_empty(&desc->list)) {
832		/* The descriptor doesn't belong to client */
833		dev_err(dev, "Descriptor %p not prepared!\n", tx);
834		return -EBUSY;
835	}
836
837	mutex_lock(&ichan->chan_mutex);
838
839	async_tx_clear_ack(tx);
840
841	if (ichan->status < IPU_CHANNEL_READY) {
842		struct idmac_video_param *video = &ichan->params.video;
843		/*
844		 * Initial buffer assignment - the first two sg-entries from
845		 * the descriptor will end up in the IDMAC buffers
846		 */
847		dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
848			sg_dma_address(&desc->sg[1]);
849
850		WARN_ON(ichan->sg[0] || ichan->sg[1]);
851
852		cookie = ipu_init_channel_buffer(ichan,
853						 video->out_pixel_fmt,
854						 video->out_width,
855						 video->out_height,
856						 video->out_stride,
857						 IPU_ROTATE_NONE,
858						 sg_dma_address(&desc->sg[0]),
859						 dma_1);
860		if (cookie < 0)
861			goto out;
862	}
863
864	dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
865
866	cookie = dma_cookie_assign(tx);
867
868	/* ipu->lock can be taken under ichan->lock, but not v.v. */
869	spin_lock_irqsave(&ichan->lock, flags);
870
871	list_add_tail(&desc->list, &ichan->queue);
872	/* submit_buffers() atomically verifies and fills empty sg slots */
873	ret = ipu_submit_channel_buffers(ichan, desc);
874
875	spin_unlock_irqrestore(&ichan->lock, flags);
876
877	if (ret < 0) {
878		cookie = ret;
879		goto dequeue;
880	}
881
882	if (ichan->status < IPU_CHANNEL_ENABLED) {
883		ret = ipu_enable_channel(idmac, ichan);
884		if (ret < 0) {
885			cookie = ret;
886			goto dequeue;
887		}
888	}
889
890	dump_idmac_reg(ipu);
891
892dequeue:
893	if (cookie < 0) {
894		spin_lock_irqsave(&ichan->lock, flags);
895		list_del_init(&desc->list);
896		spin_unlock_irqrestore(&ichan->lock, flags);
897		tx->cookie = cookie;
898		ichan->dma_chan.cookie = cookie;
899	}
900
901out:
902	mutex_unlock(&ichan->chan_mutex);
903
904	return cookie;
905}
906
907/* Called with ichan->chan_mutex held */
908static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
909{
910	struct idmac_tx_desc *desc =
911		vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
912	struct idmac *idmac = to_idmac(ichan->dma_chan.device);
913
914	if (!desc)
915		return -ENOMEM;
916
917	/* No interrupts, just disable the tasklet for a moment */
918	tasklet_disable(&to_ipu(idmac)->tasklet);
919
920	ichan->n_tx_desc = n;
921	ichan->desc = desc;
922	INIT_LIST_HEAD(&ichan->queue);
923	INIT_LIST_HEAD(&ichan->free_list);
924
925	while (n--) {
926		struct dma_async_tx_descriptor *txd = &desc->txd;
927
928		memset(txd, 0, sizeof(*txd));
929		dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
930		txd->tx_submit		= idmac_tx_submit;
931
932		list_add(&desc->list, &ichan->free_list);
933
934		desc++;
935	}
936
937	tasklet_enable(&to_ipu(idmac)->tasklet);
938
939	return 0;
940}
941
942/**
943 * ipu_init_channel() - initialize an IPU channel.
944 * @idmac:	IPU DMAC context.
945 * @ichan:	pointer to the channel object.
946 * @return      0 on success or negative error code on failure.
947 */
948static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
949{
950	union ipu_channel_param *params = &ichan->params;
951	uint32_t ipu_conf;
952	enum ipu_channel channel = ichan->dma_chan.chan_id;
953	unsigned long flags;
954	uint32_t reg;
955	struct ipu *ipu = to_ipu(idmac);
956	int ret = 0, n_desc = 0;
957
958	dev_dbg(ipu->dev, "init channel = %d\n", channel);
959
960	if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
961	    channel != IDMAC_IC_7)
962		return -EINVAL;
963
964	spin_lock_irqsave(&ipu->lock, flags);
965
966	switch (channel) {
967	case IDMAC_IC_7:
968		n_desc = 16;
969		reg = idmac_read_icreg(ipu, IC_CONF);
970		idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
971		break;
972	case IDMAC_IC_0:
973		n_desc = 16;
974		reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
975		idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
976		ret = ipu_ic_init_prpenc(ipu, params, true);
977		break;
978	case IDMAC_SDC_0:
979	case IDMAC_SDC_1:
980		n_desc = 4;
981	default:
982		break;
983	}
984
985	ipu->channel_init_mask |= 1L << channel;
986
987	/* Enable IPU sub module */
988	ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
989		ipu_channel_conf_mask(channel);
990	idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
991
992	spin_unlock_irqrestore(&ipu->lock, flags);
993
994	if (n_desc && !ichan->desc)
995		ret = idmac_desc_alloc(ichan, n_desc);
996
997	dump_idmac_reg(ipu);
998
999	return ret;
1000}
1001
1002/**
1003 * ipu_uninit_channel() - uninitialize an IPU channel.
1004 * @idmac:	IPU DMAC context.
1005 * @ichan:	pointer to the channel object.
1006 */
1007static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
1008{
1009	enum ipu_channel channel = ichan->dma_chan.chan_id;
1010	unsigned long flags;
1011	uint32_t reg;
1012	unsigned long chan_mask = 1UL << channel;
1013	uint32_t ipu_conf;
1014	struct ipu *ipu = to_ipu(idmac);
1015
1016	spin_lock_irqsave(&ipu->lock, flags);
1017
1018	if (!(ipu->channel_init_mask & chan_mask)) {
1019		dev_err(ipu->dev, "Channel already uninitialized %d\n",
1020			channel);
1021		spin_unlock_irqrestore(&ipu->lock, flags);
1022		return;
1023	}
1024
1025	/* Reset the double buffer */
1026	reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
1027	idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
1028
1029	ichan->sec_chan_en = false;
1030
1031	switch (channel) {
1032	case IDMAC_IC_7:
1033		reg = idmac_read_icreg(ipu, IC_CONF);
1034		idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
1035			     IC_CONF);
1036		break;
1037	case IDMAC_IC_0:
1038		reg = idmac_read_icreg(ipu, IC_CONF);
1039		idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
1040				  IC_CONF);
1041		break;
1042	case IDMAC_SDC_0:
1043	case IDMAC_SDC_1:
1044	default:
1045		break;
1046	}
1047
1048	ipu->channel_init_mask &= ~(1L << channel);
1049
1050	ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
1051		~ipu_channel_conf_mask(channel);
1052	idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1053
1054	spin_unlock_irqrestore(&ipu->lock, flags);
1055
1056	ichan->n_tx_desc = 0;
1057	vfree(ichan->desc);
1058	ichan->desc = NULL;
1059}
1060
1061/**
1062 * ipu_disable_channel() - disable an IPU channel.
1063 * @idmac:		IPU DMAC context.
1064 * @ichan:		channel object pointer.
1065 * @wait_for_stop:	flag to set whether to wait for channel end of frame or
1066 *			return immediately.
1067 * @return:		0 on success or negative error code on failure.
1068 */
1069static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1070			       bool wait_for_stop)
1071{
1072	enum ipu_channel channel = ichan->dma_chan.chan_id;
1073	struct ipu *ipu = to_ipu(idmac);
1074	uint32_t reg;
1075	unsigned long flags;
1076	unsigned long chan_mask = 1UL << channel;
1077	unsigned int timeout;
1078
1079	if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
1080		timeout = 40;
1081		/* This waiting always fails. Related to spurious irq problem */
1082		while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
1083		       (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
1084			timeout--;
1085			msleep(10);
1086
1087			if (!timeout) {
1088				dev_dbg(ipu->dev,
1089					"Warning: timeout waiting for channel %u to "
1090					"stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
1091					"busy = 0x%08X, tstat = 0x%08X\n", channel,
1092					idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
1093					idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
1094					idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
1095					idmac_read_ipureg(ipu, IPU_TASKS_STAT));
1096				break;
1097			}
1098		}
1099		dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
1100	}
1101	/* SDC BG and FG must be disabled before DMA is disabled */
1102	if (wait_for_stop && (channel == IDMAC_SDC_0 ||
1103			      channel == IDMAC_SDC_1)) {
1104		for (timeout = 5;
1105		     timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
1106			msleep(5);
1107	}
1108
1109	spin_lock_irqsave(&ipu->lock, flags);
1110
1111	/* Disable IC task */
1112	ipu_ic_disable_task(ipu, channel);
1113
1114	/* Disable DMA channel(s) */
1115	reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1116	idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1117
1118	spin_unlock_irqrestore(&ipu->lock, flags);
1119
1120	return 0;
1121}
1122
1123static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
1124	struct idmac_tx_desc **desc, struct scatterlist *sg)
1125{
1126	struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
1127
1128	if (sgnew)
1129		/* next sg-element in this list */
1130		return sgnew;
1131
1132	if ((*desc)->list.next == &ichan->queue)
1133		/* No more descriptors on the queue */
1134		return NULL;
1135
1136	/* Fetch next descriptor */
1137	*desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
1138	return (*desc)->sg;
1139}
1140
1141/*
1142 * We have several possibilities here:
1143 * current BUF		next BUF
1144 *
1145 * not last sg		next not last sg
1146 * not last sg		next last sg
1147 * last sg		first sg from next descriptor
1148 * last sg		NULL
1149 *
1150 * Besides, the descriptor queue might be empty or not. We process all these
1151 * cases carefully.
1152 */
1153static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1154{
1155	struct idmac_channel *ichan = dev_id;
1156	struct device *dev = &ichan->dma_chan.dev->device;
1157	unsigned int chan_id = ichan->dma_chan.chan_id;
1158	struct scatterlist **sg, *sgnext, *sgnew = NULL;
1159	/* Next transfer descriptor */
1160	struct idmac_tx_desc *desc, *descnew;
1161	bool done = false;
1162	u32 ready0, ready1, curbuf, err;
1163	unsigned long flags;
1164	struct dmaengine_desc_callback cb;
1165
1166	/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1167
1168	dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
1169
1170	spin_lock_irqsave(&ipu_data.lock, flags);
1171
1172	ready0	= idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
1173	ready1	= idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
1174	curbuf	= idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1175	err	= idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
1176
1177	if (err & (1 << chan_id)) {
1178		idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
1179		spin_unlock_irqrestore(&ipu_data.lock, flags);
1180		/*
1181		 * Doing this
1182		 * ichan->sg[0] = ichan->sg[1] = NULL;
1183		 * you can force channel re-enable on the next tx_submit(), but
1184		 * this is dirty - think about descriptors with multiple
1185		 * sg elements.
1186		 */
1187		dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1188			 chan_id, ready0, ready1, curbuf);
1189		return IRQ_HANDLED;
1190	}
1191	spin_unlock_irqrestore(&ipu_data.lock, flags);
1192
1193	/* Other interrupts do not interfere with this channel */
1194	spin_lock(&ichan->lock);
1195	if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1196		     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1197		     )) {
1198		spin_unlock(&ichan->lock);
1199		dev_dbg(dev,
1200			"IRQ with active buffer still ready on channel %x, "
1201			"active %d, ready %x, %x!\n", chan_id,
1202			ichan->active_buffer, ready0, ready1);
1203		return IRQ_NONE;
1204	}
1205
1206	if (unlikely(list_empty(&ichan->queue))) {
1207		ichan->sg[ichan->active_buffer] = NULL;
1208		spin_unlock(&ichan->lock);
1209		dev_err(dev,
1210			"IRQ without queued buffers on channel %x, active %d, "
1211			"ready %x, %x!\n", chan_id,
1212			ichan->active_buffer, ready0, ready1);
1213		return IRQ_NONE;
1214	}
1215
1216	/*
1217	 * active_buffer is a software flag, it shows which buffer we are
1218	 * currently expecting back from the hardware, IDMAC should be
1219	 * processing the other buffer already
1220	 */
1221	sg = &ichan->sg[ichan->active_buffer];
1222	sgnext = ichan->sg[!ichan->active_buffer];
1223
1224	if (!*sg) {
1225		spin_unlock(&ichan->lock);
1226		return IRQ_HANDLED;
1227	}
1228
1229	desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1230	descnew = desc;
1231
1232	dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
1233		irq, (u64)sg_dma_address(*sg),
1234		sgnext ? (u64)sg_dma_address(sgnext) : 0,
1235		ichan->active_buffer, curbuf);
1236
1237	/* Find the descriptor of sgnext */
1238	sgnew = idmac_sg_next(ichan, &descnew, *sg);
1239	if (sgnext != sgnew)
1240		dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1241
1242	/*
1243	 * if sgnext == NULL sg must be the last element in a scatterlist and
1244	 * queue must be empty
1245	 */
1246	if (unlikely(!sgnext)) {
1247		if (!WARN_ON(sg_next(*sg)))
1248			dev_dbg(dev, "Underrun on channel %x\n", chan_id);
1249		ichan->sg[!ichan->active_buffer] = sgnew;
1250
1251		if (unlikely(sgnew)) {
1252			ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
1253		} else {
1254			spin_lock_irqsave(&ipu_data.lock, flags);
1255			ipu_ic_disable_task(&ipu_data, chan_id);
1256			spin_unlock_irqrestore(&ipu_data.lock, flags);
1257			ichan->status = IPU_CHANNEL_READY;
1258			/* Continue to check for complete descriptor */
1259		}
1260	}
1261
1262	/* Calculate and submit the next sg element */
1263	sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1264
1265	if (unlikely(!sg_next(*sg)) || !sgnext) {
1266		/*
1267		 * Last element in scatterlist done, remove from the queue,
1268		 * _init for debugging
1269		 */
1270		list_del_init(&desc->list);
1271		done = true;
1272	}
1273
1274	*sg = sgnew;
1275
1276	if (likely(sgnew) &&
1277	    ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1278		dmaengine_desc_get_callback(&descnew->txd, &cb);
1279
1280		list_del_init(&descnew->list);
1281		spin_unlock(&ichan->lock);
1282
1283		dmaengine_desc_callback_invoke(&cb, NULL);
1284		spin_lock(&ichan->lock);
1285	}
1286
1287	/* Flip the active buffer - even if update above failed */
1288	ichan->active_buffer = !ichan->active_buffer;
1289	if (done)
1290		dma_cookie_complete(&desc->txd);
1291
1292	dmaengine_desc_get_callback(&desc->txd, &cb);
1293
1294	spin_unlock(&ichan->lock);
1295
1296	if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
1297		dmaengine_desc_callback_invoke(&cb, NULL);
1298
1299	return IRQ_HANDLED;
1300}
1301
1302static void ipu_gc_tasklet(struct tasklet_struct *t)
1303{
1304	struct ipu *ipu = from_tasklet(ipu, t, tasklet);
1305	int i;
1306
1307	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1308		struct idmac_channel *ichan = ipu->channel + i;
1309		struct idmac_tx_desc *desc;
1310		unsigned long flags;
1311		struct scatterlist *sg;
1312		int j, k;
1313
1314		for (j = 0; j < ichan->n_tx_desc; j++) {
1315			desc = ichan->desc + j;
1316			spin_lock_irqsave(&ichan->lock, flags);
1317			if (async_tx_test_ack(&desc->txd)) {
1318				list_move(&desc->list, &ichan->free_list);
1319				for_each_sg(desc->sg, sg, desc->sg_len, k) {
1320					if (ichan->sg[0] == sg)
1321						ichan->sg[0] = NULL;
1322					else if (ichan->sg[1] == sg)
1323						ichan->sg[1] = NULL;
1324				}
1325				async_tx_clear_ack(&desc->txd);
1326			}
1327			spin_unlock_irqrestore(&ichan->lock, flags);
1328		}
1329	}
1330}
1331
1332/* Allocate and initialise a transfer descriptor. */
1333static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1334		struct scatterlist *sgl, unsigned int sg_len,
1335		enum dma_transfer_direction direction, unsigned long tx_flags,
1336		void *context)
1337{
1338	struct idmac_channel *ichan = to_idmac_chan(chan);
1339	struct idmac_tx_desc *desc = NULL;
1340	struct dma_async_tx_descriptor *txd = NULL;
1341	unsigned long flags;
1342
1343	/* We only can handle these three channels so far */
1344	if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
1345	    chan->chan_id != IDMAC_IC_7)
1346		return NULL;
1347
1348	if (!is_slave_direction(direction)) {
1349		dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
1350		return NULL;
1351	}
1352
1353	mutex_lock(&ichan->chan_mutex);
1354
1355	spin_lock_irqsave(&ichan->lock, flags);
1356	if (!list_empty(&ichan->free_list)) {
1357		desc = list_entry(ichan->free_list.next,
1358				  struct idmac_tx_desc, list);
1359
1360		list_del_init(&desc->list);
1361
1362		desc->sg_len	= sg_len;
1363		desc->sg	= sgl;
1364		txd		= &desc->txd;
1365		txd->flags	= tx_flags;
1366	}
1367	spin_unlock_irqrestore(&ichan->lock, flags);
1368
1369	mutex_unlock(&ichan->chan_mutex);
1370
1371	tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
1372
1373	return txd;
1374}
1375
1376/* Re-select the current buffer and re-activate the channel */
1377static void idmac_issue_pending(struct dma_chan *chan)
1378{
1379	struct idmac_channel *ichan = to_idmac_chan(chan);
1380	struct idmac *idmac = to_idmac(chan->device);
1381	struct ipu *ipu = to_ipu(idmac);
1382	unsigned long flags;
1383
1384	/* This is not always needed, but doesn't hurt either */
1385	spin_lock_irqsave(&ipu->lock, flags);
1386	ipu_select_buffer(chan->chan_id, ichan->active_buffer);
1387	spin_unlock_irqrestore(&ipu->lock, flags);
1388
1389	/*
1390	 * Might need to perform some parts of initialisation from
1391	 * ipu_enable_channel(), but not all, we do not want to reset to buffer
1392	 * 0, don't need to set priority again either, but re-enabling the task
1393	 * and the channel might be a good idea.
1394	 */
1395}
1396
1397static int idmac_pause(struct dma_chan *chan)
1398{
1399	struct idmac_channel *ichan = to_idmac_chan(chan);
1400	struct idmac *idmac = to_idmac(chan->device);
1401	struct ipu *ipu = to_ipu(idmac);
1402	struct list_head *list, *tmp;
1403	unsigned long flags;
1404
1405	mutex_lock(&ichan->chan_mutex);
1406
1407	spin_lock_irqsave(&ipu->lock, flags);
1408	ipu_ic_disable_task(ipu, chan->chan_id);
1409
1410	/* Return all descriptors into "prepared" state */
1411	list_for_each_safe(list, tmp, &ichan->queue)
1412		list_del_init(list);
1413
1414	ichan->sg[0] = NULL;
1415	ichan->sg[1] = NULL;
1416
1417	spin_unlock_irqrestore(&ipu->lock, flags);
1418
1419	ichan->status = IPU_CHANNEL_INITIALIZED;
1420
1421	mutex_unlock(&ichan->chan_mutex);
1422
1423	return 0;
1424}
1425
1426static int __idmac_terminate_all(struct dma_chan *chan)
1427{
1428	struct idmac_channel *ichan = to_idmac_chan(chan);
1429	struct idmac *idmac = to_idmac(chan->device);
1430	struct ipu *ipu = to_ipu(idmac);
1431	unsigned long flags;
1432	int i;
1433
1434	ipu_disable_channel(idmac, ichan,
1435			    ichan->status >= IPU_CHANNEL_ENABLED);
1436
1437	tasklet_disable(&ipu->tasklet);
1438
1439	/* ichan->queue is modified in ISR, have to spinlock */
1440	spin_lock_irqsave(&ichan->lock, flags);
1441	list_splice_init(&ichan->queue, &ichan->free_list);
1442
1443	if (ichan->desc)
1444		for (i = 0; i < ichan->n_tx_desc; i++) {
1445			struct idmac_tx_desc *desc = ichan->desc + i;
1446			if (list_empty(&desc->list))
1447				/* Descriptor was prepared, but not submitted */
1448				list_add(&desc->list, &ichan->free_list);
1449
1450			async_tx_clear_ack(&desc->txd);
1451		}
1452
1453	ichan->sg[0] = NULL;
1454	ichan->sg[1] = NULL;
1455	spin_unlock_irqrestore(&ichan->lock, flags);
1456
1457	tasklet_enable(&ipu->tasklet);
1458
1459	ichan->status = IPU_CHANNEL_INITIALIZED;
1460
1461	return 0;
1462}
1463
1464static int idmac_terminate_all(struct dma_chan *chan)
1465{
1466	struct idmac_channel *ichan = to_idmac_chan(chan);
1467	int ret;
1468
1469	mutex_lock(&ichan->chan_mutex);
1470
1471	ret = __idmac_terminate_all(chan);
1472
1473	mutex_unlock(&ichan->chan_mutex);
1474
1475	return ret;
1476}
1477
1478#ifdef DEBUG
1479static irqreturn_t ic_sof_irq(int irq, void *dev_id)
1480{
1481	struct idmac_channel *ichan = dev_id;
1482	printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
1483	       irq, ichan->dma_chan.chan_id);
1484	disable_irq_nosync(irq);
1485	return IRQ_HANDLED;
1486}
1487
1488static irqreturn_t ic_eof_irq(int irq, void *dev_id)
1489{
1490	struct idmac_channel *ichan = dev_id;
1491	printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
1492	       irq, ichan->dma_chan.chan_id);
1493	disable_irq_nosync(irq);
1494	return IRQ_HANDLED;
1495}
1496
1497static int ic_sof = -EINVAL, ic_eof = -EINVAL;
1498#endif
1499
1500static int idmac_alloc_chan_resources(struct dma_chan *chan)
1501{
1502	struct idmac_channel *ichan = to_idmac_chan(chan);
1503	struct idmac *idmac = to_idmac(chan->device);
1504	int ret;
1505
1506	/* dmaengine.c now guarantees to only offer free channels */
1507	BUG_ON(chan->client_count > 1);
1508	WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1509
1510	dma_cookie_init(chan);
1511
1512	ret = ipu_irq_map(chan->chan_id);
1513	if (ret < 0)
1514		goto eimap;
1515
1516	ichan->eof_irq = ret;
1517
1518	/*
1519	 * Important to first disable the channel, because maybe someone
1520	 * used it before us, e.g., the bootloader
1521	 */
1522	ipu_disable_channel(idmac, ichan, true);
1523
1524	ret = ipu_init_channel(idmac, ichan);
1525	if (ret < 0)
1526		goto eichan;
1527
1528	ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1529			  ichan->eof_name, ichan);
1530	if (ret < 0)
1531		goto erirq;
1532
1533#ifdef DEBUG
1534	if (chan->chan_id == IDMAC_IC_7) {
1535		ic_sof = ipu_irq_map(69);
1536		if (ic_sof > 0) {
1537			ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
1538			if (ret)
1539				dev_err(&chan->dev->device, "request irq failed for IC SOF");
1540		}
1541		ic_eof = ipu_irq_map(70);
1542		if (ic_eof > 0) {
1543			ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
1544			if (ret)
1545				dev_err(&chan->dev->device, "request irq failed for IC EOF");
1546		}
1547	}
1548#endif
1549
1550	ichan->status = IPU_CHANNEL_INITIALIZED;
1551
1552	dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
1553		chan->chan_id, ichan->eof_irq);
1554
1555	return ret;
1556
1557erirq:
1558	ipu_uninit_channel(idmac, ichan);
1559eichan:
1560	ipu_irq_unmap(chan->chan_id);
1561eimap:
1562	return ret;
1563}
1564
1565static void idmac_free_chan_resources(struct dma_chan *chan)
1566{
1567	struct idmac_channel *ichan = to_idmac_chan(chan);
1568	struct idmac *idmac = to_idmac(chan->device);
1569
1570	mutex_lock(&ichan->chan_mutex);
1571
1572	__idmac_terminate_all(chan);
1573
1574	if (ichan->status > IPU_CHANNEL_FREE) {
1575#ifdef DEBUG
1576		if (chan->chan_id == IDMAC_IC_7) {
1577			if (ic_sof > 0) {
1578				free_irq(ic_sof, ichan);
1579				ipu_irq_unmap(69);
1580				ic_sof = -EINVAL;
1581			}
1582			if (ic_eof > 0) {
1583				free_irq(ic_eof, ichan);
1584				ipu_irq_unmap(70);
1585				ic_eof = -EINVAL;
1586			}
1587		}
1588#endif
1589		free_irq(ichan->eof_irq, ichan);
1590		ipu_irq_unmap(chan->chan_id);
1591	}
1592
1593	ichan->status = IPU_CHANNEL_FREE;
1594
1595	ipu_uninit_channel(idmac, ichan);
1596
1597	mutex_unlock(&ichan->chan_mutex);
1598
1599	tasklet_schedule(&to_ipu(idmac)->tasklet);
1600}
1601
1602static enum dma_status idmac_tx_status(struct dma_chan *chan,
1603		       dma_cookie_t cookie, struct dma_tx_state *txstate)
1604{
1605	return dma_cookie_status(chan, cookie, txstate);
1606}
1607
1608static int __init ipu_idmac_init(struct ipu *ipu)
1609{
1610	struct idmac *idmac = &ipu->idmac;
1611	struct dma_device *dma = &idmac->dma;
1612	int i;
1613
1614	dma_cap_set(DMA_SLAVE, dma->cap_mask);
1615	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1616
1617	/* Compulsory common fields */
1618	dma->dev				= ipu->dev;
1619	dma->device_alloc_chan_resources	= idmac_alloc_chan_resources;
1620	dma->device_free_chan_resources		= idmac_free_chan_resources;
1621	dma->device_tx_status			= idmac_tx_status;
1622	dma->device_issue_pending		= idmac_issue_pending;
1623
1624	/* Compulsory for DMA_SLAVE fields */
1625	dma->device_prep_slave_sg		= idmac_prep_slave_sg;
1626	dma->device_pause			= idmac_pause;
1627	dma->device_terminate_all		= idmac_terminate_all;
1628
1629	INIT_LIST_HEAD(&dma->channels);
1630	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1631		struct idmac_channel *ichan = ipu->channel + i;
1632		struct dma_chan *dma_chan = &ichan->dma_chan;
1633
1634		spin_lock_init(&ichan->lock);
1635		mutex_init(&ichan->chan_mutex);
1636
1637		ichan->status		= IPU_CHANNEL_FREE;
1638		ichan->sec_chan_en	= false;
1639		snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1640
1641		dma_chan->device	= &idmac->dma;
1642		dma_cookie_init(dma_chan);
1643		dma_chan->chan_id	= i;
1644		list_add_tail(&dma_chan->device_node, &dma->channels);
1645	}
1646
1647	idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
1648
1649	return dma_async_device_register(&idmac->dma);
1650}
1651
1652static void ipu_idmac_exit(struct ipu *ipu)
1653{
1654	int i;
1655	struct idmac *idmac = &ipu->idmac;
1656
1657	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1658		struct idmac_channel *ichan = ipu->channel + i;
1659
1660		idmac_terminate_all(&ichan->dma_chan);
1661	}
1662
1663	dma_async_device_unregister(&idmac->dma);
1664}
1665
1666/*****************************************************************************
1667 * IPU common probe / remove
1668 */
1669
1670static int __init ipu_probe(struct platform_device *pdev)
1671{
1672	struct resource *mem_ipu, *mem_ic;
1673	int ret;
1674
1675	spin_lock_init(&ipu_data.lock);
1676
1677	mem_ipu	= platform_get_resource(pdev, IORESOURCE_MEM, 0);
1678	mem_ic	= platform_get_resource(pdev, IORESOURCE_MEM, 1);
1679	if (!mem_ipu || !mem_ic)
1680		return -EINVAL;
1681
1682	ipu_data.dev = &pdev->dev;
1683
1684	platform_set_drvdata(pdev, &ipu_data);
1685
1686	ret = platform_get_irq(pdev, 0);
1687	if (ret < 0)
1688		goto err_noirq;
1689
1690	ipu_data.irq_fn = ret;
1691	ret = platform_get_irq(pdev, 1);
1692	if (ret < 0)
1693		goto err_noirq;
1694
1695	ipu_data.irq_err = ret;
1696
1697	dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
1698		ipu_data.irq_fn, ipu_data.irq_err);
1699
1700	/* Remap IPU common registers */
1701	ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
1702	if (!ipu_data.reg_ipu) {
1703		ret = -ENOMEM;
1704		goto err_ioremap_ipu;
1705	}
1706
1707	/* Remap Image Converter and Image DMA Controller registers */
1708	ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
1709	if (!ipu_data.reg_ic) {
1710		ret = -ENOMEM;
1711		goto err_ioremap_ic;
1712	}
1713
1714	/* Get IPU clock */
1715	ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
1716	if (IS_ERR(ipu_data.ipu_clk)) {
1717		ret = PTR_ERR(ipu_data.ipu_clk);
1718		goto err_clk_get;
1719	}
1720
1721	/* Make sure IPU HSP clock is running */
1722	clk_prepare_enable(ipu_data.ipu_clk);
1723
1724	/* Disable all interrupts */
1725	idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
1726	idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
1727	idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
1728	idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
1729	idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
1730
1731	dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
1732		(unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
1733
1734	ret = ipu_irq_attach_irq(&ipu_data, pdev);
1735	if (ret < 0)
1736		goto err_attach_irq;
1737
1738	/* Initialize DMA engine */
1739	ret = ipu_idmac_init(&ipu_data);
1740	if (ret < 0)
1741		goto err_idmac_init;
1742
1743	tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
1744
1745	ipu_data.dev = &pdev->dev;
1746
1747	dev_dbg(ipu_data.dev, "IPU initialized\n");
1748
1749	return 0;
1750
1751err_idmac_init:
1752err_attach_irq:
1753	ipu_irq_detach_irq(&ipu_data, pdev);
1754	clk_disable_unprepare(ipu_data.ipu_clk);
1755	clk_put(ipu_data.ipu_clk);
1756err_clk_get:
1757	iounmap(ipu_data.reg_ic);
1758err_ioremap_ic:
1759	iounmap(ipu_data.reg_ipu);
1760err_ioremap_ipu:
1761err_noirq:
1762	dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
1763	return ret;
1764}
1765
1766static int ipu_remove(struct platform_device *pdev)
1767{
1768	struct ipu *ipu = platform_get_drvdata(pdev);
1769
1770	ipu_idmac_exit(ipu);
1771	ipu_irq_detach_irq(ipu, pdev);
1772	clk_disable_unprepare(ipu->ipu_clk);
1773	clk_put(ipu->ipu_clk);
1774	iounmap(ipu->reg_ic);
1775	iounmap(ipu->reg_ipu);
1776	tasklet_kill(&ipu->tasklet);
1777
1778	return 0;
1779}
1780
1781/*
1782 * We need two MEM resources - with IPU-common and Image Converter registers,
1783 * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
1784 */
1785static struct platform_driver ipu_platform_driver = {
1786	.driver = {
1787		.name	= "ipu-core",
1788	},
1789	.remove		= ipu_remove,
1790};
1791
1792static int __init ipu_init(void)
1793{
1794	return platform_driver_probe(&ipu_platform_driver, ipu_probe);
1795}
1796subsys_initcall(ipu_init);
1797
1798MODULE_DESCRIPTION("IPU core driver");
1799MODULE_LICENSE("GPL v2");
1800MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
1801MODULE_ALIAS("platform:ipu-core");
1802