1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
4 * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
5 * Copyright (C) 2012 Avionic Design GmbH
6 */
7
8#include <linux/clk.h>
9#include <linux/completion.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/gpio/consumer.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/mtd/partitions.h>
17#include <linux/mtd/rawnand.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/reset.h>
21
22#define COMMAND					0x00
23#define   COMMAND_GO				BIT(31)
24#define   COMMAND_CLE				BIT(30)
25#define   COMMAND_ALE				BIT(29)
26#define   COMMAND_PIO				BIT(28)
27#define   COMMAND_TX				BIT(27)
28#define   COMMAND_RX				BIT(26)
29#define   COMMAND_SEC_CMD			BIT(25)
30#define   COMMAND_AFT_DAT			BIT(24)
31#define   COMMAND_TRANS_SIZE(size)		((((size) - 1) & 0xf) << 20)
32#define   COMMAND_A_VALID			BIT(19)
33#define   COMMAND_B_VALID			BIT(18)
34#define   COMMAND_RD_STATUS_CHK			BIT(17)
35#define   COMMAND_RBSY_CHK			BIT(16)
36#define   COMMAND_CE(x)				BIT(8 + ((x) & 0x7))
37#define   COMMAND_CLE_SIZE(size)		((((size) - 1) & 0x3) << 4)
38#define   COMMAND_ALE_SIZE(size)		((((size) - 1) & 0xf) << 0)
39
40#define STATUS					0x04
41
42#define ISR					0x08
43#define   ISR_CORRFAIL_ERR			BIT(24)
44#define   ISR_UND				BIT(7)
45#define   ISR_OVR				BIT(6)
46#define   ISR_CMD_DONE				BIT(5)
47#define   ISR_ECC_ERR				BIT(4)
48
49#define IER					0x0c
50#define   IER_ERR_TRIG_VAL(x)			(((x) & 0xf) << 16)
51#define   IER_UND				BIT(7)
52#define   IER_OVR				BIT(6)
53#define   IER_CMD_DONE				BIT(5)
54#define   IER_ECC_ERR				BIT(4)
55#define   IER_GIE				BIT(0)
56
57#define CONFIG					0x10
58#define   CONFIG_HW_ECC				BIT(31)
59#define   CONFIG_ECC_SEL			BIT(30)
60#define   CONFIG_ERR_COR			BIT(29)
61#define   CONFIG_PIPE_EN			BIT(28)
62#define   CONFIG_TVAL_4				(0 << 24)
63#define   CONFIG_TVAL_6				(1 << 24)
64#define   CONFIG_TVAL_8				(2 << 24)
65#define   CONFIG_SKIP_SPARE			BIT(23)
66#define   CONFIG_BUS_WIDTH_16			BIT(21)
67#define   CONFIG_COM_BSY			BIT(20)
68#define   CONFIG_PS_256				(0 << 16)
69#define   CONFIG_PS_512				(1 << 16)
70#define   CONFIG_PS_1024			(2 << 16)
71#define   CONFIG_PS_2048			(3 << 16)
72#define   CONFIG_PS_4096			(4 << 16)
73#define   CONFIG_SKIP_SPARE_SIZE_4		(0 << 14)
74#define   CONFIG_SKIP_SPARE_SIZE_8		(1 << 14)
75#define   CONFIG_SKIP_SPARE_SIZE_12		(2 << 14)
76#define   CONFIG_SKIP_SPARE_SIZE_16		(3 << 14)
77#define   CONFIG_TAG_BYTE_SIZE(x)			((x) & 0xff)
78
79#define TIMING_1				0x14
80#define   TIMING_TRP_RESP(x)			(((x) & 0xf) << 28)
81#define   TIMING_TWB(x)				(((x) & 0xf) << 24)
82#define   TIMING_TCR_TAR_TRR(x)			(((x) & 0xf) << 20)
83#define   TIMING_TWHR(x)			(((x) & 0xf) << 16)
84#define   TIMING_TCS(x)				(((x) & 0x3) << 14)
85#define   TIMING_TWH(x)				(((x) & 0x3) << 12)
86#define   TIMING_TWP(x)				(((x) & 0xf) <<  8)
87#define   TIMING_TRH(x)				(((x) & 0x3) <<  4)
88#define   TIMING_TRP(x)				(((x) & 0xf) <<  0)
89
90#define RESP					0x18
91
92#define TIMING_2				0x1c
93#define   TIMING_TADL(x)			((x) & 0xf)
94
95#define CMD_REG1				0x20
96#define CMD_REG2				0x24
97#define ADDR_REG1				0x28
98#define ADDR_REG2				0x2c
99
100#define DMA_MST_CTRL				0x30
101#define   DMA_MST_CTRL_GO			BIT(31)
102#define   DMA_MST_CTRL_IN			(0 << 30)
103#define   DMA_MST_CTRL_OUT			BIT(30)
104#define   DMA_MST_CTRL_PERF_EN			BIT(29)
105#define   DMA_MST_CTRL_IE_DONE			BIT(28)
106#define   DMA_MST_CTRL_REUSE			BIT(27)
107#define   DMA_MST_CTRL_BURST_1			(2 << 24)
108#define   DMA_MST_CTRL_BURST_4			(3 << 24)
109#define   DMA_MST_CTRL_BURST_8			(4 << 24)
110#define   DMA_MST_CTRL_BURST_16			(5 << 24)
111#define   DMA_MST_CTRL_IS_DONE			BIT(20)
112#define   DMA_MST_CTRL_EN_A			BIT(2)
113#define   DMA_MST_CTRL_EN_B			BIT(1)
114
115#define DMA_CFG_A				0x34
116#define DMA_CFG_B				0x38
117
118#define FIFO_CTRL				0x3c
119#define   FIFO_CTRL_CLR_ALL			BIT(3)
120
121#define DATA_PTR				0x40
122#define TAG_PTR					0x44
123#define ECC_PTR					0x48
124
125#define DEC_STATUS				0x4c
126#define   DEC_STATUS_A_ECC_FAIL			BIT(1)
127#define   DEC_STATUS_ERR_COUNT_MASK		0x00ff0000
128#define   DEC_STATUS_ERR_COUNT_SHIFT		16
129
130#define HWSTATUS_CMD				0x50
131#define HWSTATUS_MASK				0x54
132#define   HWSTATUS_RDSTATUS_MASK(x)		(((x) & 0xff) << 24)
133#define   HWSTATUS_RDSTATUS_VALUE(x)		(((x) & 0xff) << 16)
134#define   HWSTATUS_RBSY_MASK(x)			(((x) & 0xff) << 8)
135#define   HWSTATUS_RBSY_VALUE(x)		(((x) & 0xff) << 0)
136
137#define BCH_CONFIG				0xcc
138#define   BCH_ENABLE				BIT(0)
139#define   BCH_TVAL_4				(0 << 4)
140#define   BCH_TVAL_8				(1 << 4)
141#define   BCH_TVAL_14				(2 << 4)
142#define   BCH_TVAL_16				(3 << 4)
143
144#define DEC_STAT_RESULT				0xd0
145#define DEC_STAT_BUF				0xd4
146#define   DEC_STAT_BUF_FAIL_SEC_FLAG_MASK	0xff000000
147#define   DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT	24
148#define   DEC_STAT_BUF_CORR_SEC_FLAG_MASK	0x00ff0000
149#define   DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT	16
150#define   DEC_STAT_BUF_MAX_CORR_CNT_MASK	0x00001f00
151#define   DEC_STAT_BUF_MAX_CORR_CNT_SHIFT	8
152
153#define OFFSET(val, off)	((val) < (off) ? 0 : (val) - (off))
154
155#define SKIP_SPARE_BYTES	4
156#define BITS_PER_STEP_RS	18
157#define BITS_PER_STEP_BCH	13
158
159#define INT_MASK		(IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
160#define HWSTATUS_CMD_DEFAULT	NAND_STATUS_READY
161#define HWSTATUS_MASK_DEFAULT	(HWSTATUS_RDSTATUS_MASK(1) | \
162				HWSTATUS_RDSTATUS_VALUE(0) | \
163				HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
164				HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
165
166struct tegra_nand_controller {
167	struct nand_controller controller;
168	struct device *dev;
169	void __iomem *regs;
170	int irq;
171	struct clk *clk;
172	struct completion command_complete;
173	struct completion dma_complete;
174	bool last_read_error;
175	int cur_cs;
176	struct nand_chip *chip;
177};
178
179struct tegra_nand_chip {
180	struct nand_chip chip;
181	struct gpio_desc *wp_gpio;
182	struct mtd_oob_region ecc;
183	u32 config;
184	u32 config_ecc;
185	u32 bch_config;
186	int cs[1];
187};
188
189static inline struct tegra_nand_controller *
190			to_tegra_ctrl(struct nand_controller *hw_ctrl)
191{
192	return container_of(hw_ctrl, struct tegra_nand_controller, controller);
193}
194
195static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
196{
197	return container_of(chip, struct tegra_nand_chip, chip);
198}
199
200static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
201				       struct mtd_oob_region *oobregion)
202{
203	struct nand_chip *chip = mtd_to_nand(mtd);
204	int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
205					  BITS_PER_BYTE);
206
207	if (section > 0)
208		return -ERANGE;
209
210	oobregion->offset = SKIP_SPARE_BYTES;
211	oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
212
213	return 0;
214}
215
216static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
217					struct mtd_oob_region *oobregion)
218{
219	return -ERANGE;
220}
221
222static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
223	.ecc = tegra_nand_ooblayout_rs_ecc,
224	.free = tegra_nand_ooblayout_no_free,
225};
226
227static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
228					struct mtd_oob_region *oobregion)
229{
230	struct nand_chip *chip = mtd_to_nand(mtd);
231	int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
232					  BITS_PER_BYTE);
233
234	if (section > 0)
235		return -ERANGE;
236
237	oobregion->offset = SKIP_SPARE_BYTES;
238	oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
239
240	return 0;
241}
242
243static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
244	.ecc = tegra_nand_ooblayout_bch_ecc,
245	.free = tegra_nand_ooblayout_no_free,
246};
247
248static irqreturn_t tegra_nand_irq(int irq, void *data)
249{
250	struct tegra_nand_controller *ctrl = data;
251	u32 isr, dma;
252
253	isr = readl_relaxed(ctrl->regs + ISR);
254	dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
255	dev_dbg(ctrl->dev, "isr %08x\n", isr);
256
257	if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
258		return IRQ_NONE;
259
260	/*
261	 * The bit name is somewhat missleading: This is also set when
262	 * HW ECC was successful. The data sheet states:
263	 * Correctable OR Un-correctable errors occurred in the DMA transfer...
264	 */
265	if (isr & ISR_CORRFAIL_ERR)
266		ctrl->last_read_error = true;
267
268	if (isr & ISR_CMD_DONE)
269		complete(&ctrl->command_complete);
270
271	if (isr & ISR_UND)
272		dev_err(ctrl->dev, "FIFO underrun\n");
273
274	if (isr & ISR_OVR)
275		dev_err(ctrl->dev, "FIFO overrun\n");
276
277	/* handle DMA interrupts */
278	if (dma & DMA_MST_CTRL_IS_DONE) {
279		writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
280		complete(&ctrl->dma_complete);
281	}
282
283	/* clear interrupts */
284	writel_relaxed(isr, ctrl->regs + ISR);
285
286	return IRQ_HANDLED;
287}
288
289static const char * const tegra_nand_reg_names[] = {
290	"COMMAND",
291	"STATUS",
292	"ISR",
293	"IER",
294	"CONFIG",
295	"TIMING",
296	NULL,
297	"TIMING2",
298	"CMD_REG1",
299	"CMD_REG2",
300	"ADDR_REG1",
301	"ADDR_REG2",
302	"DMA_MST_CTRL",
303	"DMA_CFG_A",
304	"DMA_CFG_B",
305	"FIFO_CTRL",
306};
307
308static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
309{
310	u32 reg;
311	int i;
312
313	dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
314	for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
315		const char *reg_name = tegra_nand_reg_names[i];
316
317		if (!reg_name)
318			continue;
319
320		reg = readl_relaxed(ctrl->regs + (i * 4));
321		dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
322	}
323}
324
325static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
326{
327	u32 isr, dma;
328
329	disable_irq(ctrl->irq);
330
331	/* Abort current command/DMA operation */
332	writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
333	writel_relaxed(0, ctrl->regs + COMMAND);
334
335	/* clear interrupts */
336	isr = readl_relaxed(ctrl->regs + ISR);
337	writel_relaxed(isr, ctrl->regs + ISR);
338	dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
339	writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
340
341	reinit_completion(&ctrl->command_complete);
342	reinit_completion(&ctrl->dma_complete);
343
344	enable_irq(ctrl->irq);
345}
346
347static int tegra_nand_cmd(struct nand_chip *chip,
348			  const struct nand_subop *subop)
349{
350	const struct nand_op_instr *instr;
351	const struct nand_op_instr *instr_data_in = NULL;
352	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
353	unsigned int op_id, size = 0, offset = 0;
354	bool first_cmd = true;
355	u32 reg, cmd = 0;
356	int ret;
357
358	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
359		unsigned int naddrs, i;
360		const u8 *addrs;
361		u32 addr1 = 0, addr2 = 0;
362
363		instr = &subop->instrs[op_id];
364
365		switch (instr->type) {
366		case NAND_OP_CMD_INSTR:
367			if (first_cmd) {
368				cmd |= COMMAND_CLE;
369				writel_relaxed(instr->ctx.cmd.opcode,
370					       ctrl->regs + CMD_REG1);
371			} else {
372				cmd |= COMMAND_SEC_CMD;
373				writel_relaxed(instr->ctx.cmd.opcode,
374					       ctrl->regs + CMD_REG2);
375			}
376			first_cmd = false;
377			break;
378
379		case NAND_OP_ADDR_INSTR:
380			offset = nand_subop_get_addr_start_off(subop, op_id);
381			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
382			addrs = &instr->ctx.addr.addrs[offset];
383
384			cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
385			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
386				addr1 |= *addrs++ << (BITS_PER_BYTE * i);
387			naddrs -= i;
388			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
389				addr2 |= *addrs++ << (BITS_PER_BYTE * i);
390
391			writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
392			writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
393			break;
394
395		case NAND_OP_DATA_IN_INSTR:
396			size = nand_subop_get_data_len(subop, op_id);
397			offset = nand_subop_get_data_start_off(subop, op_id);
398
399			cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
400				COMMAND_RX | COMMAND_A_VALID;
401
402			instr_data_in = instr;
403			break;
404
405		case NAND_OP_DATA_OUT_INSTR:
406			size = nand_subop_get_data_len(subop, op_id);
407			offset = nand_subop_get_data_start_off(subop, op_id);
408
409			cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
410				COMMAND_TX | COMMAND_A_VALID;
411			memcpy(&reg, instr->ctx.data.buf.out + offset, size);
412
413			writel_relaxed(reg, ctrl->regs + RESP);
414			break;
415
416		case NAND_OP_WAITRDY_INSTR:
417			cmd |= COMMAND_RBSY_CHK;
418			break;
419		}
420	}
421
422	cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
423	writel_relaxed(cmd, ctrl->regs + COMMAND);
424	ret = wait_for_completion_timeout(&ctrl->command_complete,
425					  msecs_to_jiffies(500));
426	if (!ret) {
427		dev_err(ctrl->dev, "COMMAND timeout\n");
428		tegra_nand_dump_reg(ctrl);
429		tegra_nand_controller_abort(ctrl);
430		return -ETIMEDOUT;
431	}
432
433	if (instr_data_in) {
434		reg = readl_relaxed(ctrl->regs + RESP);
435		memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
436	}
437
438	return 0;
439}
440
441static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
442	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
443		NAND_OP_PARSER_PAT_CMD_ELEM(true),
444		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
445		NAND_OP_PARSER_PAT_CMD_ELEM(true),
446		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
447	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
448		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
449	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
450		NAND_OP_PARSER_PAT_CMD_ELEM(true),
451		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
452		NAND_OP_PARSER_PAT_CMD_ELEM(true),
453		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
454		NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
455	);
456
457static void tegra_nand_select_target(struct nand_chip *chip,
458				     unsigned int die_nr)
459{
460	struct tegra_nand_chip *nand = to_tegra_chip(chip);
461	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
462
463	ctrl->cur_cs = nand->cs[die_nr];
464}
465
466static int tegra_nand_exec_op(struct nand_chip *chip,
467			      const struct nand_operation *op,
468			      bool check_only)
469{
470	if (!check_only)
471		tegra_nand_select_target(chip, op->cs);
472
473	return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
474				      check_only);
475}
476
477static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
478			      struct nand_chip *chip, bool enable)
479{
480	struct tegra_nand_chip *nand = to_tegra_chip(chip);
481
482	if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
483		writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
484	else
485		writel_relaxed(0, ctrl->regs + BCH_CONFIG);
486
487	if (enable)
488		writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
489	else
490		writel_relaxed(nand->config, ctrl->regs + CONFIG);
491}
492
493static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
494				void *buf, void *oob_buf, int oob_len, int page,
495				bool read)
496{
497	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
498	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
499	dma_addr_t dma_addr = 0, dma_addr_oob = 0;
500	u32 addr1, cmd, dma_ctrl;
501	int ret;
502
503	tegra_nand_select_target(chip, chip->cur_cs);
504
505	if (read) {
506		writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
507		writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
508	} else {
509		writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
510		writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
511	}
512	cmd = COMMAND_CLE | COMMAND_SEC_CMD;
513
514	/* Lower 16-bits are column, by default 0 */
515	addr1 = page << 16;
516
517	if (!buf)
518		addr1 |= mtd->writesize;
519	writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
520
521	if (chip->options & NAND_ROW_ADDR_3) {
522		writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
523		cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
524	} else {
525		cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
526	}
527
528	if (buf) {
529		dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
530		ret = dma_mapping_error(ctrl->dev, dma_addr);
531		if (ret) {
532			dev_err(ctrl->dev, "dma mapping error\n");
533			return -EINVAL;
534		}
535
536		writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
537		writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
538	}
539
540	if (oob_buf) {
541		dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
542					      dir);
543		ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
544		if (ret) {
545			dev_err(ctrl->dev, "dma mapping error\n");
546			ret = -EINVAL;
547			goto err_unmap_dma_page;
548		}
549
550		writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
551		writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
552	}
553
554	dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
555		   DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
556		   DMA_MST_CTRL_BURST_16;
557
558	if (buf)
559		dma_ctrl |= DMA_MST_CTRL_EN_A;
560	if (oob_buf)
561		dma_ctrl |= DMA_MST_CTRL_EN_B;
562
563	if (read)
564		dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
565	else
566		dma_ctrl |= DMA_MST_CTRL_OUT;
567
568	writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
569
570	cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
571	       COMMAND_CE(ctrl->cur_cs);
572
573	if (buf)
574		cmd |= COMMAND_A_VALID;
575	if (oob_buf)
576		cmd |= COMMAND_B_VALID;
577
578	if (read)
579		cmd |= COMMAND_RX;
580	else
581		cmd |= COMMAND_TX | COMMAND_AFT_DAT;
582
583	writel_relaxed(cmd, ctrl->regs + COMMAND);
584
585	ret = wait_for_completion_timeout(&ctrl->command_complete,
586					  msecs_to_jiffies(500));
587	if (!ret) {
588		dev_err(ctrl->dev, "COMMAND timeout\n");
589		tegra_nand_dump_reg(ctrl);
590		tegra_nand_controller_abort(ctrl);
591		ret = -ETIMEDOUT;
592		goto err_unmap_dma;
593	}
594
595	ret = wait_for_completion_timeout(&ctrl->dma_complete,
596					  msecs_to_jiffies(500));
597	if (!ret) {
598		dev_err(ctrl->dev, "DMA timeout\n");
599		tegra_nand_dump_reg(ctrl);
600		tegra_nand_controller_abort(ctrl);
601		ret = -ETIMEDOUT;
602		goto err_unmap_dma;
603	}
604	ret = 0;
605
606err_unmap_dma:
607	if (oob_buf)
608		dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
609err_unmap_dma_page:
610	if (buf)
611		dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
612
613	return ret;
614}
615
616static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
617				    int oob_required, int page)
618{
619	struct mtd_info *mtd = nand_to_mtd(chip);
620	void *oob_buf = oob_required ? chip->oob_poi : NULL;
621
622	return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
623				    mtd->oobsize, page, true);
624}
625
626static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
627				     int oob_required, int page)
628{
629	struct mtd_info *mtd = nand_to_mtd(chip);
630	void *oob_buf = oob_required ? chip->oob_poi : NULL;
631
632	return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
633				     mtd->oobsize, page, false);
634}
635
636static int tegra_nand_read_oob(struct nand_chip *chip, int page)
637{
638	struct mtd_info *mtd = nand_to_mtd(chip);
639
640	return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
641				    mtd->oobsize, page, true);
642}
643
644static int tegra_nand_write_oob(struct nand_chip *chip, int page)
645{
646	struct mtd_info *mtd = nand_to_mtd(chip);
647
648	return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
649				    mtd->oobsize, page, false);
650}
651
652static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
653				      int oob_required, int page)
654{
655	struct mtd_info *mtd = nand_to_mtd(chip);
656	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
657	struct tegra_nand_chip *nand = to_tegra_chip(chip);
658	void *oob_buf = oob_required ? chip->oob_poi : NULL;
659	u32 dec_stat, max_corr_cnt;
660	unsigned long fail_sec_flag;
661	int ret;
662
663	tegra_nand_hw_ecc(ctrl, chip, true);
664	ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
665	tegra_nand_hw_ecc(ctrl, chip, false);
666	if (ret)
667		return ret;
668
669	/* No correctable or un-correctable errors, page must have 0 bitflips */
670	if (!ctrl->last_read_error)
671		return 0;
672
673	/*
674	 * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
675	 * which contains information for all ECC selections.
676	 *
677	 * Note that since we do not use Command Queues DEC_RESULT does not
678	 * state the number of pages we can read from the DEC_STAT_BUF. But
679	 * since CORRFAIL_ERR did occur during page read we do have a valid
680	 * result in DEC_STAT_BUF.
681	 */
682	ctrl->last_read_error = false;
683	dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
684
685	fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
686			DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
687
688	max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
689		       DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
690
691	if (fail_sec_flag) {
692		int bit, max_bitflips = 0;
693
694		/*
695		 * Since we do not support subpage writes, a complete page
696		 * is either written or not. We can take a shortcut here by
697		 * checking wheather any of the sector has been successful
698		 * read. If at least one sectors has been read successfully,
699		 * the page must have been a written previously. It cannot
700		 * be an erased page.
701		 *
702		 * E.g. controller might return fail_sec_flag with 0x4, which
703		 * would mean only the third sector failed to correct. The
704		 * page must have been written and the third sector is really
705		 * not correctable anymore.
706		 */
707		if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
708			mtd->ecc_stats.failed += hweight8(fail_sec_flag);
709			return max_corr_cnt;
710		}
711
712		/*
713		 * All sectors failed to correct, but the ECC isn't smart
714		 * enough to figure out if a page is really just erased.
715		 * Read OOB data and check whether data/OOB is completely
716		 * erased or if error correction just failed for all sub-
717		 * pages.
718		 */
719		ret = tegra_nand_read_oob(chip, page);
720		if (ret < 0)
721			return ret;
722
723		for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
724			u8 *data = buf + (chip->ecc.size * bit);
725			u8 *oob = chip->oob_poi + nand->ecc.offset +
726				  (chip->ecc.bytes * bit);
727
728			ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
729							  oob, chip->ecc.bytes,
730							  NULL, 0,
731							  chip->ecc.strength);
732			if (ret < 0) {
733				mtd->ecc_stats.failed++;
734			} else {
735				mtd->ecc_stats.corrected += ret;
736				max_bitflips = max(ret, max_bitflips);
737			}
738		}
739
740		return max_t(unsigned int, max_corr_cnt, max_bitflips);
741	} else {
742		int corr_sec_flag;
743
744		corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
745				DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
746
747		/*
748		 * The value returned in the register is the maximum of
749		 * bitflips encountered in any of the ECC regions. As there is
750		 * no way to get the number of bitflips in a specific regions
751		 * we are not able to deliver correct stats but instead
752		 * overestimate the number of corrected bitflips by assuming
753		 * that all regions where errors have been corrected
754		 * encountered the maximum number of bitflips.
755		 */
756		mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
757
758		return max_corr_cnt;
759	}
760}
761
762static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
763				       int oob_required, int page)
764{
765	struct mtd_info *mtd = nand_to_mtd(chip);
766	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
767	void *oob_buf = oob_required ? chip->oob_poi : NULL;
768	int ret;
769
770	tegra_nand_hw_ecc(ctrl, chip, true);
771	ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
772				   0, page, false);
773	tegra_nand_hw_ecc(ctrl, chip, false);
774
775	return ret;
776}
777
778static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
779				    const struct nand_sdr_timings *timings)
780{
781	/*
782	 * The period (and all other timings in this function) is in ps,
783	 * so need to take care here to avoid integer overflows.
784	 */
785	unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
786	unsigned int period = DIV_ROUND_UP(1000000, rate);
787	u32 val, reg = 0;
788
789	val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
790				timings->tRC_min), period);
791	reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
792
793	val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
794			       max(timings->tALS_min, timings->tALH_min)),
795			   period);
796	reg |= TIMING_TCS(OFFSET(val, 2));
797
798	val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
799			   period);
800	reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
801
802	reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
803	reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
804	reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
805	reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
806	reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
807
808	writel_relaxed(reg, ctrl->regs + TIMING_1);
809
810	val = DIV_ROUND_UP(timings->tADL_min, period);
811	reg = TIMING_TADL(OFFSET(val, 3));
812
813	writel_relaxed(reg, ctrl->regs + TIMING_2);
814}
815
816static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
817				      const struct nand_interface_config *conf)
818{
819	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
820	const struct nand_sdr_timings *timings;
821
822	timings = nand_get_sdr_timings(conf);
823	if (IS_ERR(timings))
824		return PTR_ERR(timings);
825
826	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
827		return 0;
828
829	tegra_nand_setup_timing(ctrl, timings);
830
831	return 0;
832}
833
834static const int rs_strength_bootable[] = { 4 };
835static const int rs_strength[] = { 4, 6, 8 };
836static const int bch_strength_bootable[] = { 8, 16 };
837static const int bch_strength[] = { 4, 8, 14, 16 };
838
839static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
840				   int strength_len, int bits_per_step,
841				   int oobsize)
842{
843	struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
844	const struct nand_ecc_props *requirements =
845		nanddev_get_ecc_requirements(base);
846	bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
847	int i;
848
849	/*
850	 * Loop through available strengths. Backwards in case we try to
851	 * maximize the BCH strength.
852	 */
853	for (i = 0; i < strength_len; i++) {
854		int strength_sel, bytes_per_step, bytes_per_page;
855
856		if (maximize) {
857			strength_sel = strength[strength_len - i - 1];
858		} else {
859			strength_sel = strength[i];
860
861			if (strength_sel < requirements->strength)
862				continue;
863		}
864
865		bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
866					      BITS_PER_BYTE);
867		bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
868
869		/* Check whether strength fits OOB */
870		if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
871			return strength_sel;
872	}
873
874	return -EINVAL;
875}
876
877static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
878{
879	const int *strength;
880	int strength_len, bits_per_step;
881
882	switch (chip->ecc.algo) {
883	case NAND_ECC_ALGO_RS:
884		bits_per_step = BITS_PER_STEP_RS;
885		if (chip->options & NAND_IS_BOOT_MEDIUM) {
886			strength = rs_strength_bootable;
887			strength_len = ARRAY_SIZE(rs_strength_bootable);
888		} else {
889			strength = rs_strength;
890			strength_len = ARRAY_SIZE(rs_strength);
891		}
892		break;
893	case NAND_ECC_ALGO_BCH:
894		bits_per_step = BITS_PER_STEP_BCH;
895		if (chip->options & NAND_IS_BOOT_MEDIUM) {
896			strength = bch_strength_bootable;
897			strength_len = ARRAY_SIZE(bch_strength_bootable);
898		} else {
899			strength = bch_strength;
900			strength_len = ARRAY_SIZE(bch_strength);
901		}
902		break;
903	default:
904		return -EINVAL;
905	}
906
907	return tegra_nand_get_strength(chip, strength, strength_len,
908				       bits_per_step, oobsize);
909}
910
911static int tegra_nand_attach_chip(struct nand_chip *chip)
912{
913	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
914	const struct nand_ecc_props *requirements =
915		nanddev_get_ecc_requirements(&chip->base);
916	struct tegra_nand_chip *nand = to_tegra_chip(chip);
917	struct mtd_info *mtd = nand_to_mtd(chip);
918	int bits_per_step;
919	int ret;
920
921	if (chip->bbt_options & NAND_BBT_USE_FLASH)
922		chip->bbt_options |= NAND_BBT_NO_OOB;
923
924	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
925	chip->ecc.size = 512;
926	chip->ecc.steps = mtd->writesize / chip->ecc.size;
927	if (requirements->step_size != 512) {
928		dev_err(ctrl->dev, "Unsupported step size %d\n",
929			requirements->step_size);
930		return -EINVAL;
931	}
932
933	chip->ecc.read_page = tegra_nand_read_page_hwecc;
934	chip->ecc.write_page = tegra_nand_write_page_hwecc;
935	chip->ecc.read_page_raw = tegra_nand_read_page_raw;
936	chip->ecc.write_page_raw = tegra_nand_write_page_raw;
937	chip->ecc.read_oob = tegra_nand_read_oob;
938	chip->ecc.write_oob = tegra_nand_write_oob;
939
940	if (chip->options & NAND_BUSWIDTH_16)
941		nand->config |= CONFIG_BUS_WIDTH_16;
942
943	if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
944		if (mtd->writesize < 2048)
945			chip->ecc.algo = NAND_ECC_ALGO_RS;
946		else
947			chip->ecc.algo = NAND_ECC_ALGO_BCH;
948	}
949
950	if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
951		dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
952		return -EINVAL;
953	}
954
955	if (!chip->ecc.strength) {
956		ret = tegra_nand_select_strength(chip, mtd->oobsize);
957		if (ret < 0) {
958			dev_err(ctrl->dev,
959				"No valid strength found, minimum %d\n",
960				requirements->strength);
961			return ret;
962		}
963
964		chip->ecc.strength = ret;
965	}
966
967	nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
968			   CONFIG_SKIP_SPARE_SIZE_4;
969
970	switch (chip->ecc.algo) {
971	case NAND_ECC_ALGO_RS:
972		bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
973		mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
974		nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
975				    CONFIG_ERR_COR;
976		switch (chip->ecc.strength) {
977		case 4:
978			nand->config_ecc |= CONFIG_TVAL_4;
979			break;
980		case 6:
981			nand->config_ecc |= CONFIG_TVAL_6;
982			break;
983		case 8:
984			nand->config_ecc |= CONFIG_TVAL_8;
985			break;
986		default:
987			dev_err(ctrl->dev, "ECC strength %d not supported\n",
988				chip->ecc.strength);
989			return -EINVAL;
990		}
991		break;
992	case NAND_ECC_ALGO_BCH:
993		bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
994		mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
995		nand->bch_config = BCH_ENABLE;
996		switch (chip->ecc.strength) {
997		case 4:
998			nand->bch_config |= BCH_TVAL_4;
999			break;
1000		case 8:
1001			nand->bch_config |= BCH_TVAL_8;
1002			break;
1003		case 14:
1004			nand->bch_config |= BCH_TVAL_14;
1005			break;
1006		case 16:
1007			nand->bch_config |= BCH_TVAL_16;
1008			break;
1009		default:
1010			dev_err(ctrl->dev, "ECC strength %d not supported\n",
1011				chip->ecc.strength);
1012			return -EINVAL;
1013		}
1014		break;
1015	default:
1016		dev_err(ctrl->dev, "ECC algorithm not supported\n");
1017		return -EINVAL;
1018	}
1019
1020	dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
1021		 chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
1022		 chip->ecc.strength);
1023
1024	chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
1025
1026	switch (mtd->writesize) {
1027	case 256:
1028		nand->config |= CONFIG_PS_256;
1029		break;
1030	case 512:
1031		nand->config |= CONFIG_PS_512;
1032		break;
1033	case 1024:
1034		nand->config |= CONFIG_PS_1024;
1035		break;
1036	case 2048:
1037		nand->config |= CONFIG_PS_2048;
1038		break;
1039	case 4096:
1040		nand->config |= CONFIG_PS_4096;
1041		break;
1042	default:
1043		dev_err(ctrl->dev, "Unsupported writesize %d\n",
1044			mtd->writesize);
1045		return -ENODEV;
1046	}
1047
1048	/* Store complete configuration for HW ECC in config_ecc */
1049	nand->config_ecc |= nand->config;
1050
1051	/* Non-HW ECC read/writes complete OOB */
1052	nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
1053	writel_relaxed(nand->config, ctrl->regs + CONFIG);
1054
1055	return 0;
1056}
1057
1058static const struct nand_controller_ops tegra_nand_controller_ops = {
1059	.attach_chip = &tegra_nand_attach_chip,
1060	.exec_op = tegra_nand_exec_op,
1061	.setup_interface = tegra_nand_setup_interface,
1062};
1063
1064static int tegra_nand_chips_init(struct device *dev,
1065				 struct tegra_nand_controller *ctrl)
1066{
1067	struct device_node *np = dev->of_node;
1068	struct device_node *np_nand;
1069	int nsels, nchips = of_get_child_count(np);
1070	struct tegra_nand_chip *nand;
1071	struct mtd_info *mtd;
1072	struct nand_chip *chip;
1073	int ret;
1074	u32 cs;
1075
1076	if (nchips != 1) {
1077		dev_err(dev, "Currently only one NAND chip supported\n");
1078		return -EINVAL;
1079	}
1080
1081	np_nand = of_get_next_child(np, NULL);
1082
1083	nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
1084	if (nsels != 1) {
1085		dev_err(dev, "Missing/invalid reg property\n");
1086		return -EINVAL;
1087	}
1088
1089	/* Retrieve CS id, currently only single die NAND supported */
1090	ret = of_property_read_u32(np_nand, "reg", &cs);
1091	if (ret) {
1092		dev_err(dev, "could not retrieve reg property: %d\n", ret);
1093		return ret;
1094	}
1095
1096	nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
1097	if (!nand)
1098		return -ENOMEM;
1099
1100	nand->cs[0] = cs;
1101
1102	nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
1103
1104	if (IS_ERR(nand->wp_gpio)) {
1105		ret = PTR_ERR(nand->wp_gpio);
1106		dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
1107		return ret;
1108	}
1109
1110	chip = &nand->chip;
1111	chip->controller = &ctrl->controller;
1112
1113	mtd = nand_to_mtd(chip);
1114
1115	mtd->dev.parent = dev;
1116	mtd->owner = THIS_MODULE;
1117
1118	nand_set_flash_node(chip, np_nand);
1119
1120	if (!mtd->name)
1121		mtd->name = "tegra_nand";
1122
1123	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
1124
1125	ret = nand_scan(chip, 1);
1126	if (ret)
1127		return ret;
1128
1129	mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
1130
1131	ret = mtd_device_register(mtd, NULL, 0);
1132	if (ret) {
1133		dev_err(dev, "Failed to register mtd device: %d\n", ret);
1134		nand_cleanup(chip);
1135		return ret;
1136	}
1137
1138	ctrl->chip = chip;
1139
1140	return 0;
1141}
1142
1143static int tegra_nand_probe(struct platform_device *pdev)
1144{
1145	struct reset_control *rst;
1146	struct tegra_nand_controller *ctrl;
1147	struct resource *res;
1148	int err = 0;
1149
1150	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
1151	if (!ctrl)
1152		return -ENOMEM;
1153
1154	ctrl->dev = &pdev->dev;
1155	nand_controller_init(&ctrl->controller);
1156	ctrl->controller.ops = &tegra_nand_controller_ops;
1157
1158	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1159	ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
1160	if (IS_ERR(ctrl->regs))
1161		return PTR_ERR(ctrl->regs);
1162
1163	rst = devm_reset_control_get(&pdev->dev, "nand");
1164	if (IS_ERR(rst))
1165		return PTR_ERR(rst);
1166
1167	ctrl->clk = devm_clk_get(&pdev->dev, "nand");
1168	if (IS_ERR(ctrl->clk))
1169		return PTR_ERR(ctrl->clk);
1170
1171	err = clk_prepare_enable(ctrl->clk);
1172	if (err)
1173		return err;
1174
1175	err = reset_control_reset(rst);
1176	if (err) {
1177		dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
1178		goto err_disable_clk;
1179	}
1180
1181	writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
1182	writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
1183	writel_relaxed(INT_MASK, ctrl->regs + IER);
1184
1185	init_completion(&ctrl->command_complete);
1186	init_completion(&ctrl->dma_complete);
1187
1188	ctrl->irq = platform_get_irq(pdev, 0);
1189	err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
1190			       dev_name(&pdev->dev), ctrl);
1191	if (err) {
1192		dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
1193		goto err_disable_clk;
1194	}
1195
1196	writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
1197
1198	err = tegra_nand_chips_init(ctrl->dev, ctrl);
1199	if (err)
1200		goto err_disable_clk;
1201
1202	platform_set_drvdata(pdev, ctrl);
1203
1204	return 0;
1205
1206err_disable_clk:
1207	clk_disable_unprepare(ctrl->clk);
1208	return err;
1209}
1210
1211static int tegra_nand_remove(struct platform_device *pdev)
1212{
1213	struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
1214	struct nand_chip *chip = ctrl->chip;
1215	struct mtd_info *mtd = nand_to_mtd(chip);
1216	int ret;
1217
1218	ret = mtd_device_unregister(mtd);
1219	if (ret)
1220		return ret;
1221
1222	nand_cleanup(chip);
1223
1224	clk_disable_unprepare(ctrl->clk);
1225
1226	return 0;
1227}
1228
1229static const struct of_device_id tegra_nand_of_match[] = {
1230	{ .compatible = "nvidia,tegra20-nand" },
1231	{ /* sentinel */ }
1232};
1233MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
1234
1235static struct platform_driver tegra_nand_driver = {
1236	.driver = {
1237		.name = "tegra-nand",
1238		.of_match_table = tegra_nand_of_match,
1239	},
1240	.probe = tegra_nand_probe,
1241	.remove = tegra_nand_remove,
1242};
1243module_platform_driver(tegra_nand_driver);
1244
1245MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
1246MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
1247MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
1248MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
1249MODULE_LICENSE("GPL v2");
1250