1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SuperH FLCTL nand controller
4 *
5 * Copyright (c) 2008 Renesas Solutions Corp.
6 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
7 *
8 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/completion.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/sh_dma.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/rawnand.h>
29#include <linux/mtd/partitions.h>
30#include <linux/mtd/sh_flctl.h>
31
32static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
33					struct mtd_oob_region *oobregion)
34{
35	struct nand_chip *chip = mtd_to_nand(mtd);
36
37	if (section)
38		return -ERANGE;
39
40	oobregion->offset = 0;
41	oobregion->length = chip->ecc.bytes;
42
43	return 0;
44}
45
46static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
47					 struct mtd_oob_region *oobregion)
48{
49	if (section)
50		return -ERANGE;
51
52	oobregion->offset = 12;
53	oobregion->length = 4;
54
55	return 0;
56}
57
58static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
59	.ecc = flctl_4secc_ooblayout_sp_ecc,
60	.free = flctl_4secc_ooblayout_sp_free,
61};
62
63static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
64					struct mtd_oob_region *oobregion)
65{
66	struct nand_chip *chip = mtd_to_nand(mtd);
67
68	if (section >= chip->ecc.steps)
69		return -ERANGE;
70
71	oobregion->offset = (section * 16) + 6;
72	oobregion->length = chip->ecc.bytes;
73
74	return 0;
75}
76
77static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
78					 struct mtd_oob_region *oobregion)
79{
80	struct nand_chip *chip = mtd_to_nand(mtd);
81
82	if (section >= chip->ecc.steps)
83		return -ERANGE;
84
85	oobregion->offset = section * 16;
86	oobregion->length = 6;
87
88	if (!section) {
89		oobregion->offset += 2;
90		oobregion->length -= 2;
91	}
92
93	return 0;
94}
95
96static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
97	.ecc = flctl_4secc_ooblayout_lp_ecc,
98	.free = flctl_4secc_ooblayout_lp_free,
99};
100
101static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
102
103static struct nand_bbt_descr flctl_4secc_smallpage = {
104	.offs = 11,
105	.len = 1,
106	.pattern = scan_ff_pattern,
107};
108
109static struct nand_bbt_descr flctl_4secc_largepage = {
110	.offs = 0,
111	.len = 2,
112	.pattern = scan_ff_pattern,
113};
114
115static void empty_fifo(struct sh_flctl *flctl)
116{
117	writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
118	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
119}
120
121static void start_translation(struct sh_flctl *flctl)
122{
123	writeb(TRSTRT, FLTRCR(flctl));
124}
125
126static void timeout_error(struct sh_flctl *flctl, const char *str)
127{
128	dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
129}
130
131static void wait_completion(struct sh_flctl *flctl)
132{
133	uint32_t timeout = LOOP_TIMEOUT_MAX;
134
135	while (timeout--) {
136		if (readb(FLTRCR(flctl)) & TREND) {
137			writeb(0x0, FLTRCR(flctl));
138			return;
139		}
140		udelay(1);
141	}
142
143	timeout_error(flctl, __func__);
144	writeb(0x0, FLTRCR(flctl));
145}
146
147static void flctl_dma_complete(void *param)
148{
149	struct sh_flctl *flctl = param;
150
151	complete(&flctl->dma_complete);
152}
153
154static void flctl_release_dma(struct sh_flctl *flctl)
155{
156	if (flctl->chan_fifo0_rx) {
157		dma_release_channel(flctl->chan_fifo0_rx);
158		flctl->chan_fifo0_rx = NULL;
159	}
160	if (flctl->chan_fifo0_tx) {
161		dma_release_channel(flctl->chan_fifo0_tx);
162		flctl->chan_fifo0_tx = NULL;
163	}
164}
165
166static void flctl_setup_dma(struct sh_flctl *flctl)
167{
168	dma_cap_mask_t mask;
169	struct dma_slave_config cfg;
170	struct platform_device *pdev = flctl->pdev;
171	struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
172	int ret;
173
174	if (!pdata)
175		return;
176
177	if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
178		return;
179
180	/* We can only either use DMA for both Tx and Rx or not use it at all */
181	dma_cap_zero(mask);
182	dma_cap_set(DMA_SLAVE, mask);
183
184	flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
185				(void *)(uintptr_t)pdata->slave_id_fifo0_tx);
186	dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
187		flctl->chan_fifo0_tx);
188
189	if (!flctl->chan_fifo0_tx)
190		return;
191
192	memset(&cfg, 0, sizeof(cfg));
193	cfg.direction = DMA_MEM_TO_DEV;
194	cfg.dst_addr = flctl->fifo;
195	cfg.src_addr = 0;
196	ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
197	if (ret < 0)
198		goto err;
199
200	flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
201				(void *)(uintptr_t)pdata->slave_id_fifo0_rx);
202	dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
203		flctl->chan_fifo0_rx);
204
205	if (!flctl->chan_fifo0_rx)
206		goto err;
207
208	cfg.direction = DMA_DEV_TO_MEM;
209	cfg.dst_addr = 0;
210	cfg.src_addr = flctl->fifo;
211	ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
212	if (ret < 0)
213		goto err;
214
215	init_completion(&flctl->dma_complete);
216
217	return;
218
219err:
220	flctl_release_dma(flctl);
221}
222
223static void set_addr(struct mtd_info *mtd, int column, int page_addr)
224{
225	struct sh_flctl *flctl = mtd_to_flctl(mtd);
226	uint32_t addr = 0;
227
228	if (column == -1) {
229		addr = page_addr;	/* ERASE1 */
230	} else if (page_addr != -1) {
231		/* SEQIN, READ0, etc.. */
232		if (flctl->chip.options & NAND_BUSWIDTH_16)
233			column >>= 1;
234		if (flctl->page_size) {
235			addr = column & 0x0FFF;
236			addr |= (page_addr & 0xff) << 16;
237			addr |= ((page_addr >> 8) & 0xff) << 24;
238			/* big than 128MB */
239			if (flctl->rw_ADRCNT == ADRCNT2_E) {
240				uint32_t 	addr2;
241				addr2 = (page_addr >> 16) & 0xff;
242				writel(addr2, FLADR2(flctl));
243			}
244		} else {
245			addr = column;
246			addr |= (page_addr & 0xff) << 8;
247			addr |= ((page_addr >> 8) & 0xff) << 16;
248			addr |= ((page_addr >> 16) & 0xff) << 24;
249		}
250	}
251	writel(addr, FLADR(flctl));
252}
253
254static void wait_rfifo_ready(struct sh_flctl *flctl)
255{
256	uint32_t timeout = LOOP_TIMEOUT_MAX;
257
258	while (timeout--) {
259		uint32_t val;
260		/* check FIFO */
261		val = readl(FLDTCNTR(flctl)) >> 16;
262		if (val & 0xFF)
263			return;
264		udelay(1);
265	}
266	timeout_error(flctl, __func__);
267}
268
269static void wait_wfifo_ready(struct sh_flctl *flctl)
270{
271	uint32_t len, timeout = LOOP_TIMEOUT_MAX;
272
273	while (timeout--) {
274		/* check FIFO */
275		len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
276		if (len >= 4)
277			return;
278		udelay(1);
279	}
280	timeout_error(flctl, __func__);
281}
282
283static enum flctl_ecc_res_t wait_recfifo_ready
284		(struct sh_flctl *flctl, int sector_number)
285{
286	uint32_t timeout = LOOP_TIMEOUT_MAX;
287	void __iomem *ecc_reg[4];
288	int i;
289	int state = FL_SUCCESS;
290	uint32_t data, size;
291
292	/*
293	 * First this loops checks in FLDTCNTR if we are ready to read out the
294	 * oob data. This is the case if either all went fine without errors or
295	 * if the bottom part of the loop corrected the errors or marked them as
296	 * uncorrectable and the controller is given time to push the data into
297	 * the FIFO.
298	 */
299	while (timeout--) {
300		/* check if all is ok and we can read out the OOB */
301		size = readl(FLDTCNTR(flctl)) >> 24;
302		if ((size & 0xFF) == 4)
303			return state;
304
305		/* check if a correction code has been calculated */
306		if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
307			/*
308			 * either we wait for the fifo to be filled or a
309			 * correction pattern is being generated
310			 */
311			udelay(1);
312			continue;
313		}
314
315		/* check for an uncorrectable error */
316		if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
317			/* check if we face a non-empty page */
318			for (i = 0; i < 512; i++) {
319				if (flctl->done_buff[i] != 0xff) {
320					state = FL_ERROR; /* can't correct */
321					break;
322				}
323			}
324
325			if (state == FL_SUCCESS)
326				dev_dbg(&flctl->pdev->dev,
327				"reading empty sector %d, ecc error ignored\n",
328				sector_number);
329
330			writel(0, FL4ECCCR(flctl));
331			continue;
332		}
333
334		/* start error correction */
335		ecc_reg[0] = FL4ECCRESULT0(flctl);
336		ecc_reg[1] = FL4ECCRESULT1(flctl);
337		ecc_reg[2] = FL4ECCRESULT2(flctl);
338		ecc_reg[3] = FL4ECCRESULT3(flctl);
339
340		for (i = 0; i < 3; i++) {
341			uint8_t org;
342			unsigned int index;
343
344			data = readl(ecc_reg[i]);
345
346			if (flctl->page_size)
347				index = (512 * sector_number) +
348					(data >> 16);
349			else
350				index = data >> 16;
351
352			org = flctl->done_buff[index];
353			flctl->done_buff[index] = org ^ (data & 0xFF);
354		}
355		state = FL_REPAIRABLE;
356		writel(0, FL4ECCCR(flctl));
357	}
358
359	timeout_error(flctl, __func__);
360	return FL_TIMEOUT;	/* timeout */
361}
362
363static void wait_wecfifo_ready(struct sh_flctl *flctl)
364{
365	uint32_t timeout = LOOP_TIMEOUT_MAX;
366	uint32_t len;
367
368	while (timeout--) {
369		/* check FLECFIFO */
370		len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
371		if (len >= 4)
372			return;
373		udelay(1);
374	}
375	timeout_error(flctl, __func__);
376}
377
378static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
379					int len, enum dma_data_direction dir)
380{
381	struct dma_async_tx_descriptor *desc = NULL;
382	struct dma_chan *chan;
383	enum dma_transfer_direction tr_dir;
384	dma_addr_t dma_addr;
385	dma_cookie_t cookie;
386	uint32_t reg;
387	int ret = 0;
388	unsigned long time_left;
389
390	if (dir == DMA_FROM_DEVICE) {
391		chan = flctl->chan_fifo0_rx;
392		tr_dir = DMA_DEV_TO_MEM;
393	} else {
394		chan = flctl->chan_fifo0_tx;
395		tr_dir = DMA_MEM_TO_DEV;
396	}
397
398	dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
399
400	if (!dma_mapping_error(chan->device->dev, dma_addr))
401		desc = dmaengine_prep_slave_single(chan, dma_addr, len,
402			tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
403
404	if (desc) {
405		reg = readl(FLINTDMACR(flctl));
406		reg |= DREQ0EN;
407		writel(reg, FLINTDMACR(flctl));
408
409		desc->callback = flctl_dma_complete;
410		desc->callback_param = flctl;
411		cookie = dmaengine_submit(desc);
412		if (dma_submit_error(cookie)) {
413			ret = dma_submit_error(cookie);
414			dev_warn(&flctl->pdev->dev,
415				 "DMA submit failed, falling back to PIO\n");
416			goto out;
417		}
418
419		dma_async_issue_pending(chan);
420	} else {
421		/* DMA failed, fall back to PIO */
422		flctl_release_dma(flctl);
423		dev_warn(&flctl->pdev->dev,
424			 "DMA failed, falling back to PIO\n");
425		ret = -EIO;
426		goto out;
427	}
428
429	time_left =
430	wait_for_completion_timeout(&flctl->dma_complete,
431				msecs_to_jiffies(3000));
432
433	if (time_left == 0) {
434		dmaengine_terminate_all(chan);
435		dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
436		ret = -ETIMEDOUT;
437	}
438
439out:
440	reg = readl(FLINTDMACR(flctl));
441	reg &= ~DREQ0EN;
442	writel(reg, FLINTDMACR(flctl));
443
444	dma_unmap_single(chan->device->dev, dma_addr, len, dir);
445
446	/* ret == 0 is success */
447	return ret;
448}
449
450static void read_datareg(struct sh_flctl *flctl, int offset)
451{
452	unsigned long data;
453	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
454
455	wait_completion(flctl);
456
457	data = readl(FLDATAR(flctl));
458	*buf = le32_to_cpu(data);
459}
460
461static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
462{
463	int i, len_4align;
464	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
465
466	len_4align = (rlen + 3) / 4;
467
468	/* initiate DMA transfer */
469	if (flctl->chan_fifo0_rx && rlen >= 32 &&
470		!flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
471			goto convert;	/* DMA success */
472
473	/* do polling transfer */
474	for (i = 0; i < len_4align; i++) {
475		wait_rfifo_ready(flctl);
476		buf[i] = readl(FLDTFIFO(flctl));
477	}
478
479convert:
480	for (i = 0; i < len_4align; i++)
481		buf[i] = be32_to_cpu(buf[i]);
482}
483
484static enum flctl_ecc_res_t read_ecfiforeg
485		(struct sh_flctl *flctl, uint8_t *buff, int sector)
486{
487	int i;
488	enum flctl_ecc_res_t res;
489	unsigned long *ecc_buf = (unsigned long *)buff;
490
491	res = wait_recfifo_ready(flctl , sector);
492
493	if (res != FL_ERROR) {
494		for (i = 0; i < 4; i++) {
495			ecc_buf[i] = readl(FLECFIFO(flctl));
496			ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
497		}
498	}
499
500	return res;
501}
502
503static void write_fiforeg(struct sh_flctl *flctl, int rlen,
504						unsigned int offset)
505{
506	int i, len_4align;
507	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
508
509	len_4align = (rlen + 3) / 4;
510	for (i = 0; i < len_4align; i++) {
511		wait_wfifo_ready(flctl);
512		writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
513	}
514}
515
516static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
517						unsigned int offset)
518{
519	int i, len_4align;
520	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
521
522	len_4align = (rlen + 3) / 4;
523
524	for (i = 0; i < len_4align; i++)
525		buf[i] = cpu_to_be32(buf[i]);
526
527	/* initiate DMA transfer */
528	if (flctl->chan_fifo0_tx && rlen >= 32 &&
529		!flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
530			return;	/* DMA success */
531
532	/* do polling transfer */
533	for (i = 0; i < len_4align; i++) {
534		wait_wecfifo_ready(flctl);
535		writel(buf[i], FLECFIFO(flctl));
536	}
537}
538
539static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
540{
541	struct sh_flctl *flctl = mtd_to_flctl(mtd);
542	uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
543	uint32_t flcmdcr_val, addr_len_bytes = 0;
544
545	/* Set SNAND bit if page size is 2048byte */
546	if (flctl->page_size)
547		flcmncr_val |= SNAND_E;
548	else
549		flcmncr_val &= ~SNAND_E;
550
551	/* default FLCMDCR val */
552	flcmdcr_val = DOCMD1_E | DOADR_E;
553
554	/* Set for FLCMDCR */
555	switch (cmd) {
556	case NAND_CMD_ERASE1:
557		addr_len_bytes = flctl->erase_ADRCNT;
558		flcmdcr_val |= DOCMD2_E;
559		break;
560	case NAND_CMD_READ0:
561	case NAND_CMD_READOOB:
562	case NAND_CMD_RNDOUT:
563		addr_len_bytes = flctl->rw_ADRCNT;
564		flcmdcr_val |= CDSRC_E;
565		if (flctl->chip.options & NAND_BUSWIDTH_16)
566			flcmncr_val |= SEL_16BIT;
567		break;
568	case NAND_CMD_SEQIN:
569		/* This case is that cmd is READ0 or READ1 or READ00 */
570		flcmdcr_val &= ~DOADR_E;	/* ONLY execute 1st cmd */
571		break;
572	case NAND_CMD_PAGEPROG:
573		addr_len_bytes = flctl->rw_ADRCNT;
574		flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
575		if (flctl->chip.options & NAND_BUSWIDTH_16)
576			flcmncr_val |= SEL_16BIT;
577		break;
578	case NAND_CMD_READID:
579		flcmncr_val &= ~SNAND_E;
580		flcmdcr_val |= CDSRC_E;
581		addr_len_bytes = ADRCNT_1;
582		break;
583	case NAND_CMD_STATUS:
584	case NAND_CMD_RESET:
585		flcmncr_val &= ~SNAND_E;
586		flcmdcr_val &= ~(DOADR_E | DOSR_E);
587		break;
588	default:
589		break;
590	}
591
592	/* Set address bytes parameter */
593	flcmdcr_val |= addr_len_bytes;
594
595	/* Now actually write */
596	writel(flcmncr_val, FLCMNCR(flctl));
597	writel(flcmdcr_val, FLCMDCR(flctl));
598	writel(flcmcdr_val, FLCMCDR(flctl));
599}
600
601static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
602				 int oob_required, int page)
603{
604	struct mtd_info *mtd = nand_to_mtd(chip);
605
606	nand_read_page_op(chip, page, 0, buf, mtd->writesize);
607	if (oob_required)
608		chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
609	return 0;
610}
611
612static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
613				  int oob_required, int page)
614{
615	struct mtd_info *mtd = nand_to_mtd(chip);
616
617	nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
618	chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
619	return nand_prog_page_end_op(chip);
620}
621
622static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
623{
624	struct sh_flctl *flctl = mtd_to_flctl(mtd);
625	int sector, page_sectors;
626	enum flctl_ecc_res_t ecc_result;
627
628	page_sectors = flctl->page_size ? 4 : 1;
629
630	set_cmd_regs(mtd, NAND_CMD_READ0,
631		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
632
633	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
634		 FLCMNCR(flctl));
635	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
636	writel(page_addr << 2, FLADR(flctl));
637
638	empty_fifo(flctl);
639	start_translation(flctl);
640
641	for (sector = 0; sector < page_sectors; sector++) {
642		read_fiforeg(flctl, 512, 512 * sector);
643
644		ecc_result = read_ecfiforeg(flctl,
645			&flctl->done_buff[mtd->writesize + 16 * sector],
646			sector);
647
648		switch (ecc_result) {
649		case FL_REPAIRABLE:
650			dev_info(&flctl->pdev->dev,
651				"applied ecc on page 0x%x", page_addr);
652			mtd->ecc_stats.corrected++;
653			break;
654		case FL_ERROR:
655			dev_warn(&flctl->pdev->dev,
656				"page 0x%x contains corrupted data\n",
657				page_addr);
658			mtd->ecc_stats.failed++;
659			break;
660		default:
661			;
662		}
663	}
664
665	wait_completion(flctl);
666
667	writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
668			FLCMNCR(flctl));
669}
670
671static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
672{
673	struct sh_flctl *flctl = mtd_to_flctl(mtd);
674	int page_sectors = flctl->page_size ? 4 : 1;
675	int i;
676
677	set_cmd_regs(mtd, NAND_CMD_READ0,
678		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
679
680	empty_fifo(flctl);
681
682	for (i = 0; i < page_sectors; i++) {
683		set_addr(mtd, (512 + 16) * i + 512 , page_addr);
684		writel(16, FLDTCNTR(flctl));
685
686		start_translation(flctl);
687		read_fiforeg(flctl, 16, 16 * i);
688		wait_completion(flctl);
689	}
690}
691
692static void execmd_write_page_sector(struct mtd_info *mtd)
693{
694	struct sh_flctl *flctl = mtd_to_flctl(mtd);
695	int page_addr = flctl->seqin_page_addr;
696	int sector, page_sectors;
697
698	page_sectors = flctl->page_size ? 4 : 1;
699
700	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
701			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
702
703	empty_fifo(flctl);
704	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
705	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
706	writel(page_addr << 2, FLADR(flctl));
707	start_translation(flctl);
708
709	for (sector = 0; sector < page_sectors; sector++) {
710		write_fiforeg(flctl, 512, 512 * sector);
711		write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
712	}
713
714	wait_completion(flctl);
715	writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
716}
717
718static void execmd_write_oob(struct mtd_info *mtd)
719{
720	struct sh_flctl *flctl = mtd_to_flctl(mtd);
721	int page_addr = flctl->seqin_page_addr;
722	int sector, page_sectors;
723
724	page_sectors = flctl->page_size ? 4 : 1;
725
726	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
727			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
728
729	for (sector = 0; sector < page_sectors; sector++) {
730		empty_fifo(flctl);
731		set_addr(mtd, sector * 528 + 512, page_addr);
732		writel(16, FLDTCNTR(flctl));	/* set read size */
733
734		start_translation(flctl);
735		write_fiforeg(flctl, 16, 16 * sector);
736		wait_completion(flctl);
737	}
738}
739
740static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
741			int column, int page_addr)
742{
743	struct mtd_info *mtd = nand_to_mtd(chip);
744	struct sh_flctl *flctl = mtd_to_flctl(mtd);
745	uint32_t read_cmd = 0;
746
747	pm_runtime_get_sync(&flctl->pdev->dev);
748
749	flctl->read_bytes = 0;
750	if (command != NAND_CMD_PAGEPROG)
751		flctl->index = 0;
752
753	switch (command) {
754	case NAND_CMD_READ1:
755	case NAND_CMD_READ0:
756		if (flctl->hwecc) {
757			/* read page with hwecc */
758			execmd_read_page_sector(mtd, page_addr);
759			break;
760		}
761		if (flctl->page_size)
762			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
763				| command);
764		else
765			set_cmd_regs(mtd, command, command);
766
767		set_addr(mtd, 0, page_addr);
768
769		flctl->read_bytes = mtd->writesize + mtd->oobsize;
770		if (flctl->chip.options & NAND_BUSWIDTH_16)
771			column >>= 1;
772		flctl->index += column;
773		goto read_normal_exit;
774
775	case NAND_CMD_READOOB:
776		if (flctl->hwecc) {
777			/* read page with hwecc */
778			execmd_read_oob(mtd, page_addr);
779			break;
780		}
781
782		if (flctl->page_size) {
783			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
784				| NAND_CMD_READ0);
785			set_addr(mtd, mtd->writesize, page_addr);
786		} else {
787			set_cmd_regs(mtd, command, command);
788			set_addr(mtd, 0, page_addr);
789		}
790		flctl->read_bytes = mtd->oobsize;
791		goto read_normal_exit;
792
793	case NAND_CMD_RNDOUT:
794		if (flctl->hwecc)
795			break;
796
797		if (flctl->page_size)
798			set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
799				| command);
800		else
801			set_cmd_regs(mtd, command, command);
802
803		set_addr(mtd, column, 0);
804
805		flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
806		goto read_normal_exit;
807
808	case NAND_CMD_READID:
809		set_cmd_regs(mtd, command, command);
810
811		/* READID is always performed using an 8-bit bus */
812		if (flctl->chip.options & NAND_BUSWIDTH_16)
813			column <<= 1;
814		set_addr(mtd, column, 0);
815
816		flctl->read_bytes = 8;
817		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
818		empty_fifo(flctl);
819		start_translation(flctl);
820		read_fiforeg(flctl, flctl->read_bytes, 0);
821		wait_completion(flctl);
822		break;
823
824	case NAND_CMD_ERASE1:
825		flctl->erase1_page_addr = page_addr;
826		break;
827
828	case NAND_CMD_ERASE2:
829		set_cmd_regs(mtd, NAND_CMD_ERASE1,
830			(command << 8) | NAND_CMD_ERASE1);
831		set_addr(mtd, -1, flctl->erase1_page_addr);
832		start_translation(flctl);
833		wait_completion(flctl);
834		break;
835
836	case NAND_CMD_SEQIN:
837		if (!flctl->page_size) {
838			/* output read command */
839			if (column >= mtd->writesize) {
840				column -= mtd->writesize;
841				read_cmd = NAND_CMD_READOOB;
842			} else if (column < 256) {
843				read_cmd = NAND_CMD_READ0;
844			} else {
845				column -= 256;
846				read_cmd = NAND_CMD_READ1;
847			}
848		}
849		flctl->seqin_column = column;
850		flctl->seqin_page_addr = page_addr;
851		flctl->seqin_read_cmd = read_cmd;
852		break;
853
854	case NAND_CMD_PAGEPROG:
855		empty_fifo(flctl);
856		if (!flctl->page_size) {
857			set_cmd_regs(mtd, NAND_CMD_SEQIN,
858					flctl->seqin_read_cmd);
859			set_addr(mtd, -1, -1);
860			writel(0, FLDTCNTR(flctl));	/* set 0 size */
861			start_translation(flctl);
862			wait_completion(flctl);
863		}
864		if (flctl->hwecc) {
865			/* write page with hwecc */
866			if (flctl->seqin_column == mtd->writesize)
867				execmd_write_oob(mtd);
868			else if (!flctl->seqin_column)
869				execmd_write_page_sector(mtd);
870			else
871				pr_err("Invalid address !?\n");
872			break;
873		}
874		set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
875		set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
876		writel(flctl->index, FLDTCNTR(flctl));	/* set write size */
877		start_translation(flctl);
878		write_fiforeg(flctl, flctl->index, 0);
879		wait_completion(flctl);
880		break;
881
882	case NAND_CMD_STATUS:
883		set_cmd_regs(mtd, command, command);
884		set_addr(mtd, -1, -1);
885
886		flctl->read_bytes = 1;
887		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
888		start_translation(flctl);
889		read_datareg(flctl, 0); /* read and end */
890		break;
891
892	case NAND_CMD_RESET:
893		set_cmd_regs(mtd, command, command);
894		set_addr(mtd, -1, -1);
895
896		writel(0, FLDTCNTR(flctl));	/* set 0 size */
897		start_translation(flctl);
898		wait_completion(flctl);
899		break;
900
901	default:
902		break;
903	}
904	goto runtime_exit;
905
906read_normal_exit:
907	writel(flctl->read_bytes, FLDTCNTR(flctl));	/* set read size */
908	empty_fifo(flctl);
909	start_translation(flctl);
910	read_fiforeg(flctl, flctl->read_bytes, 0);
911	wait_completion(flctl);
912runtime_exit:
913	pm_runtime_put_sync(&flctl->pdev->dev);
914	return;
915}
916
917static void flctl_select_chip(struct nand_chip *chip, int chipnr)
918{
919	struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
920	int ret;
921
922	switch (chipnr) {
923	case -1:
924		flctl->flcmncr_base &= ~CE0_ENABLE;
925
926		pm_runtime_get_sync(&flctl->pdev->dev);
927		writel(flctl->flcmncr_base, FLCMNCR(flctl));
928
929		if (flctl->qos_request) {
930			dev_pm_qos_remove_request(&flctl->pm_qos);
931			flctl->qos_request = 0;
932		}
933
934		pm_runtime_put_sync(&flctl->pdev->dev);
935		break;
936	case 0:
937		flctl->flcmncr_base |= CE0_ENABLE;
938
939		if (!flctl->qos_request) {
940			ret = dev_pm_qos_add_request(&flctl->pdev->dev,
941							&flctl->pm_qos,
942							DEV_PM_QOS_RESUME_LATENCY,
943							100);
944			if (ret < 0)
945				dev_err(&flctl->pdev->dev,
946					"PM QoS request failed: %d\n", ret);
947			flctl->qos_request = 1;
948		}
949
950		if (flctl->holden) {
951			pm_runtime_get_sync(&flctl->pdev->dev);
952			writel(HOLDEN, FLHOLDCR(flctl));
953			pm_runtime_put_sync(&flctl->pdev->dev);
954		}
955		break;
956	default:
957		BUG();
958	}
959}
960
961static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
962{
963	struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
964
965	memcpy(&flctl->done_buff[flctl->index], buf, len);
966	flctl->index += len;
967}
968
969static uint8_t flctl_read_byte(struct nand_chip *chip)
970{
971	struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
972	uint8_t data;
973
974	data = flctl->done_buff[flctl->index];
975	flctl->index++;
976	return data;
977}
978
979static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
980{
981	struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
982
983	memcpy(buf, &flctl->done_buff[flctl->index], len);
984	flctl->index += len;
985}
986
987static int flctl_chip_attach_chip(struct nand_chip *chip)
988{
989	u64 targetsize = nanddev_target_size(&chip->base);
990	struct mtd_info *mtd = nand_to_mtd(chip);
991	struct sh_flctl *flctl = mtd_to_flctl(mtd);
992
993	/*
994	 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
995	 * Add the SEL_16BIT flag in flctl->flcmncr_base.
996	 */
997	if (chip->options & NAND_BUSWIDTH_16)
998		flctl->flcmncr_base |= SEL_16BIT;
999
1000	if (mtd->writesize == 512) {
1001		flctl->page_size = 0;
1002		if (targetsize > (32 << 20)) {
1003			/* big than 32MB */
1004			flctl->rw_ADRCNT = ADRCNT_4;
1005			flctl->erase_ADRCNT = ADRCNT_3;
1006		} else if (targetsize > (2 << 16)) {
1007			/* big than 128KB */
1008			flctl->rw_ADRCNT = ADRCNT_3;
1009			flctl->erase_ADRCNT = ADRCNT_2;
1010		} else {
1011			flctl->rw_ADRCNT = ADRCNT_2;
1012			flctl->erase_ADRCNT = ADRCNT_1;
1013		}
1014	} else {
1015		flctl->page_size = 1;
1016		if (targetsize > (128 << 20)) {
1017			/* big than 128MB */
1018			flctl->rw_ADRCNT = ADRCNT2_E;
1019			flctl->erase_ADRCNT = ADRCNT_3;
1020		} else if (targetsize > (8 << 16)) {
1021			/* big than 512KB */
1022			flctl->rw_ADRCNT = ADRCNT_4;
1023			flctl->erase_ADRCNT = ADRCNT_2;
1024		} else {
1025			flctl->rw_ADRCNT = ADRCNT_3;
1026			flctl->erase_ADRCNT = ADRCNT_1;
1027		}
1028	}
1029
1030	if (flctl->hwecc) {
1031		if (mtd->writesize == 512) {
1032			mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
1033			chip->badblock_pattern = &flctl_4secc_smallpage;
1034		} else {
1035			mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
1036			chip->badblock_pattern = &flctl_4secc_largepage;
1037		}
1038
1039		chip->ecc.size = 512;
1040		chip->ecc.bytes = 10;
1041		chip->ecc.strength = 4;
1042		chip->ecc.read_page = flctl_read_page_hwecc;
1043		chip->ecc.write_page = flctl_write_page_hwecc;
1044		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
1045
1046		/* 4 symbols ECC enabled */
1047		flctl->flcmncr_base |= _4ECCEN;
1048	} else {
1049		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
1050		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
1051	}
1052
1053	return 0;
1054}
1055
1056static const struct nand_controller_ops flctl_nand_controller_ops = {
1057	.attach_chip = flctl_chip_attach_chip,
1058};
1059
1060static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1061{
1062	struct sh_flctl *flctl = dev_id;
1063
1064	dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1065	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1066
1067	return IRQ_HANDLED;
1068}
1069
1070struct flctl_soc_config {
1071	unsigned long flcmncr_val;
1072	unsigned has_hwecc:1;
1073	unsigned use_holden:1;
1074};
1075
1076static struct flctl_soc_config flctl_sh7372_config = {
1077	.flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1078	.has_hwecc = 1,
1079	.use_holden = 1,
1080};
1081
1082static const struct of_device_id of_flctl_match[] = {
1083	{ .compatible = "renesas,shmobile-flctl-sh7372",
1084				.data = &flctl_sh7372_config },
1085	{},
1086};
1087MODULE_DEVICE_TABLE(of, of_flctl_match);
1088
1089static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1090{
1091	const struct flctl_soc_config *config;
1092	struct sh_flctl_platform_data *pdata;
1093
1094	config = of_device_get_match_data(dev);
1095	if (!config) {
1096		dev_err(dev, "%s: no OF configuration attached\n", __func__);
1097		return NULL;
1098	}
1099
1100	pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1101								GFP_KERNEL);
1102	if (!pdata)
1103		return NULL;
1104
1105	/* set SoC specific options */
1106	pdata->flcmncr_val = config->flcmncr_val;
1107	pdata->has_hwecc = config->has_hwecc;
1108	pdata->use_holden = config->use_holden;
1109
1110	return pdata;
1111}
1112
1113static int flctl_probe(struct platform_device *pdev)
1114{
1115	struct resource *res;
1116	struct sh_flctl *flctl;
1117	struct mtd_info *flctl_mtd;
1118	struct nand_chip *nand;
1119	struct sh_flctl_platform_data *pdata;
1120	int ret;
1121	int irq;
1122
1123	flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
1124	if (!flctl)
1125		return -ENOMEM;
1126
1127	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1128	flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1129	if (IS_ERR(flctl->reg))
1130		return PTR_ERR(flctl->reg);
1131	flctl->fifo = res->start + 0x24; /* FLDTFIFO */
1132
1133	irq = platform_get_irq(pdev, 0);
1134	if (irq < 0)
1135		return irq;
1136
1137	ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
1138			       "flste", flctl);
1139	if (ret) {
1140		dev_err(&pdev->dev, "request interrupt failed.\n");
1141		return ret;
1142	}
1143
1144	if (pdev->dev.of_node)
1145		pdata = flctl_parse_dt(&pdev->dev);
1146	else
1147		pdata = dev_get_platdata(&pdev->dev);
1148
1149	if (!pdata) {
1150		dev_err(&pdev->dev, "no setup data defined\n");
1151		return -EINVAL;
1152	}
1153
1154	platform_set_drvdata(pdev, flctl);
1155	nand = &flctl->chip;
1156	flctl_mtd = nand_to_mtd(nand);
1157	nand_set_flash_node(nand, pdev->dev.of_node);
1158	flctl_mtd->dev.parent = &pdev->dev;
1159	flctl->pdev = pdev;
1160	flctl->hwecc = pdata->has_hwecc;
1161	flctl->holden = pdata->use_holden;
1162	flctl->flcmncr_base = pdata->flcmncr_val;
1163	flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1164
1165	/* Set address of hardware control function */
1166	/* 20 us command delay time */
1167	nand->legacy.chip_delay = 20;
1168
1169	nand->legacy.read_byte = flctl_read_byte;
1170	nand->legacy.write_buf = flctl_write_buf;
1171	nand->legacy.read_buf = flctl_read_buf;
1172	nand->legacy.select_chip = flctl_select_chip;
1173	nand->legacy.cmdfunc = flctl_cmdfunc;
1174	nand->legacy.set_features = nand_get_set_features_notsupp;
1175	nand->legacy.get_features = nand_get_set_features_notsupp;
1176
1177	if (pdata->flcmncr_val & SEL_16BIT)
1178		nand->options |= NAND_BUSWIDTH_16;
1179
1180	nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
1181
1182	pm_runtime_enable(&pdev->dev);
1183	pm_runtime_resume(&pdev->dev);
1184
1185	flctl_setup_dma(flctl);
1186
1187	nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
1188	ret = nand_scan(nand, 1);
1189	if (ret)
1190		goto err_chip;
1191
1192	ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
1193	if (ret)
1194		goto cleanup_nand;
1195
1196	return 0;
1197
1198cleanup_nand:
1199	nand_cleanup(nand);
1200err_chip:
1201	flctl_release_dma(flctl);
1202	pm_runtime_disable(&pdev->dev);
1203	return ret;
1204}
1205
1206static int flctl_remove(struct platform_device *pdev)
1207{
1208	struct sh_flctl *flctl = platform_get_drvdata(pdev);
1209	struct nand_chip *chip = &flctl->chip;
1210	int ret;
1211
1212	flctl_release_dma(flctl);
1213	ret = mtd_device_unregister(nand_to_mtd(chip));
1214	WARN_ON(ret);
1215	nand_cleanup(chip);
1216	pm_runtime_disable(&pdev->dev);
1217
1218	return 0;
1219}
1220
1221static struct platform_driver flctl_driver = {
1222	.remove		= flctl_remove,
1223	.driver = {
1224		.name	= "sh_flctl",
1225		.of_match_table = of_match_ptr(of_flctl_match),
1226	},
1227};
1228
1229module_platform_driver_probe(flctl_driver, flctl_probe);
1230
1231MODULE_LICENSE("GPL v2");
1232MODULE_AUTHOR("Yoshihiro Shimoda");
1233MODULE_DESCRIPTION("SuperH FLCTL driver");
1234MODULE_ALIAS("platform:sh_flctl");
1235