1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Marvell NAND flash controller driver
4 *
5 * Copyright (C) 2017 Marvell
6 * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
7 *
8 *
9 * This NAND controller driver handles two versions of the hardware,
10 * one is called NFCv1 and is available on PXA SoCs and the other is
11 * called NFCv2 and is available on Armada SoCs.
12 *
13 * The main visible difference is that NFCv1 only has Hamming ECC
14 * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA
15 * is not used with NFCv2.
16 *
17 * The ECC layouts are depicted in details in Marvell AN-379, but here
18 * is a brief description.
19 *
20 * When using Hamming, the data is split in 512B chunks (either 1, 2
21 * or 4) and each chunk will have its own ECC "digest" of 6B at the
22 * beginning of the OOB area and eventually the remaining free OOB
23 * bytes (also called "spare" bytes in the driver). This engine
24 * corrects up to 1 bit per chunk and detects reliably an error if
25 * there are at most 2 bitflips. Here is the page layout used by the
26 * controller when Hamming is chosen:
27 *
28 * +-------------------------------------------------------------+
29 * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes |
30 * +-------------------------------------------------------------+
31 *
32 * When using the BCH engine, there are N identical (data + free OOB +
33 * ECC) sections and potentially an extra one to deal with
34 * configurations where the chosen (data + free OOB + ECC) sizes do
35 * not align with the page (data + OOB) size. ECC bytes are always
36 * 30B per ECC chunk. Here is the page layout used by the controller
37 * when BCH is chosen:
38 *
39 * +-----------------------------------------
40 * | Data 1 | Free OOB bytes 1 | ECC 1 | ...
41 * +-----------------------------------------
42 *
43 *      -------------------------------------------
44 *       ... | Data N | Free OOB bytes N | ECC N |
45 *      -------------------------------------------
46 *
47 *           --------------------------------------------+
48 *            Last Data | Last Free OOB bytes | Last ECC |
49 *           --------------------------------------------+
50 *
51 * In both cases, the layout seen by the user is always: all data
52 * first, then all free OOB bytes and finally all ECC bytes. With BCH,
53 * ECC bytes are 30B long and are padded with 0xFF to align on 32
54 * bytes.
55 *
56 * The controller has certain limitations that are handled by the
57 * driver:
58 *   - It can only read 2k at a time. To overcome this limitation, the
59 *     driver issues data cycles on the bus, without issuing new
60 *     CMD + ADDR cycles. The Marvell term is "naked" operations.
61 *   - The ECC strength in BCH mode cannot be tuned. It is fixed 16
62 *     bits. What can be tuned is the ECC block size as long as it
63 *     stays between 512B and 2kiB. It's usually chosen based on the
64 *     chip ECC requirements. For instance, using 2kiB ECC chunks
65 *     provides 4b/512B correctability.
66 *   - The controller will always treat data bytes, free OOB bytes
67 *     and ECC bytes in that order, no matter what the real layout is
68 *     (which is usually all data then all OOB bytes). The
69 *     marvell_nfc_layouts array below contains the currently
70 *     supported layouts.
71 *   - Because of these weird layouts, the Bad Block Markers can be
72 *     located in data section. In this case, the NAND_BBT_NO_OOB_BBM
73 *     option must be set to prevent scanning/writing bad block
74 *     markers.
75 */
76
77#include <linux/module.h>
78#include <linux/clk.h>
79#include <linux/mtd/rawnand.h>
80#include <linux/of_platform.h>
81#include <linux/iopoll.h>
82#include <linux/interrupt.h>
83#include <linux/slab.h>
84#include <linux/mfd/syscon.h>
85#include <linux/regmap.h>
86#include <asm/unaligned.h>
87
88#include <linux/dmaengine.h>
89#include <linux/dma-mapping.h>
90#include <linux/dma/pxa-dma.h>
91#include <linux/platform_data/mtd-nand-pxa3xx.h>
92
93/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
94#define FIFO_DEPTH		8
95#define FIFO_REP(x)		(x / sizeof(u32))
96#define BCH_SEQ_READS		(32 / FIFO_DEPTH)
97/* NFC does not support transfers of larger chunks at a time */
98#define MAX_CHUNK_SIZE		2112
99/* NFCv1 cannot read more that 7 bytes of ID */
100#define NFCV1_READID_LEN	7
101/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
102#define POLL_PERIOD		0
103#define POLL_TIMEOUT		100000
104/* Interrupt maximum wait period in ms */
105#define IRQ_TIMEOUT		1000
106/* Latency in clock cycles between SoC pins and NFC logic */
107#define MIN_RD_DEL_CNT		3
108/* Maximum number of contiguous address cycles */
109#define MAX_ADDRESS_CYC_NFCV1	5
110#define MAX_ADDRESS_CYC_NFCV2	7
111/* System control registers/bits to enable the NAND controller on some SoCs */
112#define GENCONF_SOC_DEVICE_MUX	0x208
113#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
114#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
115#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
116#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
117#define GENCONF_CLK_GATING_CTRL	0x220
118#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
119#define GENCONF_ND_CLK_CTRL	0x700
120#define GENCONF_ND_CLK_CTRL_EN	BIT(0)
121
122/* NAND controller data flash control register */
123#define NDCR			0x00
124#define NDCR_ALL_INT		GENMASK(11, 0)
125#define NDCR_CS1_CMDDM		BIT(7)
126#define NDCR_CS0_CMDDM		BIT(8)
127#define NDCR_RDYM		BIT(11)
128#define NDCR_ND_ARB_EN		BIT(12)
129#define NDCR_RA_START		BIT(15)
130#define NDCR_RD_ID_CNT(x)	(min_t(unsigned int, x, 0x7) << 16)
131#define NDCR_PAGE_SZ(x)		(x >= 2048 ? BIT(24) : 0)
132#define NDCR_DWIDTH_M		BIT(26)
133#define NDCR_DWIDTH_C		BIT(27)
134#define NDCR_ND_RUN		BIT(28)
135#define NDCR_DMA_EN		BIT(29)
136#define NDCR_ECC_EN		BIT(30)
137#define NDCR_SPARE_EN		BIT(31)
138#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
139				    NDCR_DWIDTH_M | NDCR_DWIDTH_C))
140
141/* NAND interface timing parameter 0 register */
142#define NDTR0			0x04
143#define NDTR0_TRP(x)		((min_t(unsigned int, x, 0xF) & 0x7) << 0)
144#define NDTR0_TRH(x)		(min_t(unsigned int, x, 0x7) << 3)
145#define NDTR0_ETRP(x)		((min_t(unsigned int, x, 0xF) & 0x8) << 3)
146#define NDTR0_SEL_NRE_EDGE	BIT(7)
147#define NDTR0_TWP(x)		(min_t(unsigned int, x, 0x7) << 8)
148#define NDTR0_TWH(x)		(min_t(unsigned int, x, 0x7) << 11)
149#define NDTR0_TCS(x)		(min_t(unsigned int, x, 0x7) << 16)
150#define NDTR0_TCH(x)		(min_t(unsigned int, x, 0x7) << 19)
151#define NDTR0_RD_CNT_DEL(x)	(min_t(unsigned int, x, 0xF) << 22)
152#define NDTR0_SELCNTR		BIT(26)
153#define NDTR0_TADL(x)		(min_t(unsigned int, x, 0x1F) << 27)
154
155/* NAND interface timing parameter 1 register */
156#define NDTR1			0x0C
157#define NDTR1_TAR(x)		(min_t(unsigned int, x, 0xF) << 0)
158#define NDTR1_TWHR(x)		(min_t(unsigned int, x, 0xF) << 4)
159#define NDTR1_TRHW(x)		(min_t(unsigned int, x / 16, 0x3) << 8)
160#define NDTR1_PRESCALE		BIT(14)
161#define NDTR1_WAIT_MODE		BIT(15)
162#define NDTR1_TR(x)		(min_t(unsigned int, x, 0xFFFF) << 16)
163
164/* NAND controller status register */
165#define NDSR			0x14
166#define NDSR_WRCMDREQ		BIT(0)
167#define NDSR_RDDREQ		BIT(1)
168#define NDSR_WRDREQ		BIT(2)
169#define NDSR_CORERR		BIT(3)
170#define NDSR_UNCERR		BIT(4)
171#define NDSR_CMDD(cs)		BIT(8 - cs)
172#define NDSR_RDY(rb)		BIT(11 + rb)
173#define NDSR_ERRCNT(x)		((x >> 16) & 0x1F)
174
175/* NAND ECC control register */
176#define NDECCCTRL		0x28
177#define NDECCCTRL_BCH_EN	BIT(0)
178
179/* NAND controller data buffer register */
180#define NDDB			0x40
181
182/* NAND controller command buffer 0 register */
183#define NDCB0			0x48
184#define NDCB0_CMD1(x)		((x & 0xFF) << 0)
185#define NDCB0_CMD2(x)		((x & 0xFF) << 8)
186#define NDCB0_ADDR_CYC(x)	((x & 0x7) << 16)
187#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
188#define NDCB0_DBC		BIT(19)
189#define NDCB0_CMD_TYPE(x)	((x & 0x7) << 21)
190#define NDCB0_CSEL		BIT(24)
191#define NDCB0_RDY_BYP		BIT(27)
192#define NDCB0_LEN_OVRD		BIT(28)
193#define NDCB0_CMD_XTYPE(x)	((x & 0x7) << 29)
194
195/* NAND controller command buffer 1 register */
196#define NDCB1			0x4C
197#define NDCB1_COLS(x)		((x & 0xFFFF) << 0)
198#define NDCB1_ADDRS_PAGE(x)	(x << 16)
199
200/* NAND controller command buffer 2 register */
201#define NDCB2			0x50
202#define NDCB2_ADDR5_PAGE(x)	(((x >> 16) & 0xFF) << 0)
203#define NDCB2_ADDR5_CYC(x)	((x & 0xFF) << 0)
204
205/* NAND controller command buffer 3 register */
206#define NDCB3			0x54
207#define NDCB3_ADDR6_CYC(x)	((x & 0xFF) << 16)
208#define NDCB3_ADDR7_CYC(x)	((x & 0xFF) << 24)
209
210/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
211#define TYPE_READ		0
212#define TYPE_WRITE		1
213#define TYPE_ERASE		2
214#define TYPE_READ_ID		3
215#define TYPE_STATUS		4
216#define TYPE_RESET		5
217#define TYPE_NAKED_CMD		6
218#define TYPE_NAKED_ADDR		7
219#define TYPE_MASK		7
220#define XTYPE_MONOLITHIC_RW	0
221#define XTYPE_LAST_NAKED_RW	1
222#define XTYPE_FINAL_COMMAND	3
223#define XTYPE_READ		4
224#define XTYPE_WRITE_DISPATCH	4
225#define XTYPE_NAKED_RW		5
226#define XTYPE_COMMAND_DISPATCH	6
227#define XTYPE_MASK		7
228
229/**
230 * struct marvell_hw_ecc_layout - layout of Marvell ECC
231 *
232 * Marvell ECC engine works differently than the others, in order to limit the
233 * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
234 * per subpage, and depending on a the desired strength needed by the NAND chip,
235 * a particular layout mixing data/spare/ecc is defined, with a possible last
236 * chunk smaller that the others.
237 *
238 * @writesize:		Full page size on which the layout applies
239 * @chunk:		Desired ECC chunk size on which the layout applies
240 * @strength:		Desired ECC strength (per chunk size bytes) on which the
241 *			layout applies
242 * @nchunks:		Total number of chunks
243 * @full_chunk_cnt:	Number of full-sized chunks, which is the number of
244 *			repetitions of the pattern:
245 *			(data_bytes + spare_bytes + ecc_bytes).
246 * @data_bytes:		Number of data bytes per chunk
247 * @spare_bytes:	Number of spare bytes per chunk
248 * @ecc_bytes:		Number of ecc bytes per chunk
249 * @last_data_bytes:	Number of data bytes in the last chunk
250 * @last_spare_bytes:	Number of spare bytes in the last chunk
251 * @last_ecc_bytes:	Number of ecc bytes in the last chunk
252 */
253struct marvell_hw_ecc_layout {
254	/* Constraints */
255	int writesize;
256	int chunk;
257	int strength;
258	/* Corresponding layout */
259	int nchunks;
260	int full_chunk_cnt;
261	int data_bytes;
262	int spare_bytes;
263	int ecc_bytes;
264	int last_data_bytes;
265	int last_spare_bytes;
266	int last_ecc_bytes;
267};
268
269#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb)	\
270	{								\
271		.writesize = ws,					\
272		.chunk = dc,						\
273		.strength = ds,						\
274		.nchunks = nc,						\
275		.full_chunk_cnt = fcc,					\
276		.data_bytes = db,					\
277		.spare_bytes = sb,					\
278		.ecc_bytes = eb,					\
279		.last_data_bytes = ldb,					\
280		.last_spare_bytes = lsb,				\
281		.last_ecc_bytes = leb,					\
282	}
283
284/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
285static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
286	MARVELL_LAYOUT(  512,   512,  1,  1,  1,  512,  8,  8,  0,  0,  0),
287	MARVELL_LAYOUT( 2048,   512,  1,  1,  1, 2048, 40, 24,  0,  0,  0),
288	MARVELL_LAYOUT( 2048,   512,  4,  1,  1, 2048, 32, 30,  0,  0,  0),
289	MARVELL_LAYOUT( 2048,   512,  8,  2,  1, 1024,  0, 30,1024,32, 30),
290	MARVELL_LAYOUT( 4096,   512,  4,  2,  2, 2048, 32, 30,  0,  0,  0),
291	MARVELL_LAYOUT( 4096,   512,  8,  5,  4, 1024,  0, 30,  0, 64, 30),
292	MARVELL_LAYOUT( 8192,   512,  4,  4,  4, 2048,  0, 30,  0,  0,  0),
293	MARVELL_LAYOUT( 8192,   512,  8,  9,  8, 1024,  0, 30,  0, 160, 30),
294};
295
296/**
297 * struct marvell_nand_chip_sel - CS line description
298 *
299 * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
300 * is made by a field in NDCB0 register, and in another field in NDCB2 register.
301 * The datasheet describes the logic with an error: ADDR5 field is once
302 * declared at the beginning of NDCB2, and another time at its end. Because the
303 * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
304 * to use the last bit of this field instead of the first ones.
305 *
306 * @cs:			Wanted CE lane.
307 * @ndcb0_csel:		Value of the NDCB0 register with or without the flag
308 *			selecting the wanted CE lane. This is set once when
309 *			the Device Tree is probed.
310 * @rb:			Ready/Busy pin for the flash chip
311 */
312struct marvell_nand_chip_sel {
313	unsigned int cs;
314	u32 ndcb0_csel;
315	unsigned int rb;
316};
317
318/**
319 * struct marvell_nand_chip - stores NAND chip device related information
320 *
321 * @chip:		Base NAND chip structure
322 * @node:		Used to store NAND chips into a list
323 * @layout:		NAND layout when using hardware ECC
324 * @ndcr:		Controller register value for this NAND chip
325 * @ndtr0:		Timing registers 0 value for this NAND chip
326 * @ndtr1:		Timing registers 1 value for this NAND chip
327 * @addr_cyc:		Amount of cycles needed to pass column address
328 * @selected_die:	Current active CS
329 * @nsels:		Number of CS lines required by the NAND chip
330 * @sels:		Array of CS lines descriptions
331 */
332struct marvell_nand_chip {
333	struct nand_chip chip;
334	struct list_head node;
335	const struct marvell_hw_ecc_layout *layout;
336	u32 ndcr;
337	u32 ndtr0;
338	u32 ndtr1;
339	int addr_cyc;
340	int selected_die;
341	unsigned int nsels;
342	struct marvell_nand_chip_sel sels[];
343};
344
345static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
346{
347	return container_of(chip, struct marvell_nand_chip, chip);
348}
349
350static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
351							*nand)
352{
353	return &nand->sels[nand->selected_die];
354}
355
356/**
357 * struct marvell_nfc_caps - NAND controller capabilities for distinction
358 *                           between compatible strings
359 *
360 * @max_cs_nb:		Number of Chip Select lines available
361 * @max_rb_nb:		Number of Ready/Busy lines available
362 * @need_system_controller: Indicates if the SoC needs to have access to the
363 *                      system controller (ie. to enable the NAND controller)
364 * @legacy_of_bindings:	Indicates if DT parsing must be done using the old
365 *			fashion way
366 * @is_nfcv2:		NFCv2 has numerous enhancements compared to NFCv1, ie.
367 *			BCH error detection and correction algorithm,
368 *			NDCB3 register has been added
369 * @use_dma:		Use dma for data transfers
370 */
371struct marvell_nfc_caps {
372	unsigned int max_cs_nb;
373	unsigned int max_rb_nb;
374	bool need_system_controller;
375	bool legacy_of_bindings;
376	bool is_nfcv2;
377	bool use_dma;
378};
379
380/**
381 * struct marvell_nfc - stores Marvell NAND controller information
382 *
383 * @controller:		Base controller structure
384 * @dev:		Parent device (used to print error messages)
385 * @regs:		NAND controller registers
386 * @core_clk:		Core clock
387 * @reg_clk:		Registers clock
388 * @complete:		Completion object to wait for NAND controller events
389 * @assigned_cs:	Bitmask describing already assigned CS lines
390 * @chips:		List containing all the NAND chips attached to
391 *			this NAND controller
392 * @selected_chip:	Currently selected target chip
393 * @caps:		NAND controller capabilities for each compatible string
394 * @use_dma:		Whetner DMA is used
395 * @dma_chan:		DMA channel (NFCv1 only)
396 * @dma_buf:		32-bit aligned buffer for DMA transfers (NFCv1 only)
397 */
398struct marvell_nfc {
399	struct nand_controller controller;
400	struct device *dev;
401	void __iomem *regs;
402	struct clk *core_clk;
403	struct clk *reg_clk;
404	struct completion complete;
405	unsigned long assigned_cs;
406	struct list_head chips;
407	struct nand_chip *selected_chip;
408	const struct marvell_nfc_caps *caps;
409
410	/* DMA (NFCv1 only) */
411	bool use_dma;
412	struct dma_chan *dma_chan;
413	u8 *dma_buf;
414};
415
416static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
417{
418	return container_of(ctrl, struct marvell_nfc, controller);
419}
420
421/**
422 * struct marvell_nfc_timings - NAND controller timings expressed in NAND
423 *                              Controller clock cycles
424 *
425 * @tRP:		ND_nRE pulse width
426 * @tRH:		ND_nRE high duration
427 * @tWP:		ND_nWE pulse time
428 * @tWH:		ND_nWE high duration
429 * @tCS:		Enable signal setup time
430 * @tCH:		Enable signal hold time
431 * @tADL:		Address to write data delay
432 * @tAR:		ND_ALE low to ND_nRE low delay
433 * @tWHR:		ND_nWE high to ND_nRE low for status read
434 * @tRHW:		ND_nRE high duration, read to write delay
435 * @tR:			ND_nWE high to ND_nRE low for read
436 */
437struct marvell_nfc_timings {
438	/* NDTR0 fields */
439	unsigned int tRP;
440	unsigned int tRH;
441	unsigned int tWP;
442	unsigned int tWH;
443	unsigned int tCS;
444	unsigned int tCH;
445	unsigned int tADL;
446	/* NDTR1 fields */
447	unsigned int tAR;
448	unsigned int tWHR;
449	unsigned int tRHW;
450	unsigned int tR;
451};
452
453/**
454 * Derives a duration in numbers of clock cycles.
455 *
456 * @ps: Duration in pico-seconds
457 * @period_ns:  Clock period in nano-seconds
458 *
459 * Convert the duration in nano-seconds, then divide by the period and
460 * return the number of clock periods.
461 */
462#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
463#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
464						     period_ns))
465
466/**
467 * struct marvell_nfc_op - filled during the parsing of the ->exec_op()
468 *                         subop subset of instructions.
469 *
470 * @ndcb:		Array of values written to NDCBx registers
471 * @cle_ale_delay_ns:	Optional delay after the last CMD or ADDR cycle
472 * @rdy_timeout_ms:	Timeout for waits on Ready/Busy pin
473 * @rdy_delay_ns:	Optional delay after waiting for the RB pin
474 * @data_delay_ns:	Optional delay after the data xfer
475 * @data_instr_idx:	Index of the data instruction in the subop
476 * @data_instr:		Pointer to the data instruction in the subop
477 */
478struct marvell_nfc_op {
479	u32 ndcb[4];
480	unsigned int cle_ale_delay_ns;
481	unsigned int rdy_timeout_ms;
482	unsigned int rdy_delay_ns;
483	unsigned int data_delay_ns;
484	unsigned int data_instr_idx;
485	const struct nand_op_instr *data_instr;
486};
487
488/*
489 * Internal helper to conditionnally apply a delay (from the above structure,
490 * most of the time).
491 */
492static void cond_delay(unsigned int ns)
493{
494	if (!ns)
495		return;
496
497	if (ns < 10000)
498		ndelay(ns);
499	else
500		udelay(DIV_ROUND_UP(ns, 1000));
501}
502
503/*
504 * The controller has many flags that could generate interrupts, most of them
505 * are disabled and polling is used. For the very slow signals, using interrupts
506 * may relax the CPU charge.
507 */
508static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
509{
510	u32 reg;
511
512	/* Writing 1 disables the interrupt */
513	reg = readl_relaxed(nfc->regs + NDCR);
514	writel_relaxed(reg | int_mask, nfc->regs + NDCR);
515}
516
517static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
518{
519	u32 reg;
520
521	/* Writing 0 enables the interrupt */
522	reg = readl_relaxed(nfc->regs + NDCR);
523	writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
524}
525
526static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
527{
528	u32 reg;
529
530	reg = readl_relaxed(nfc->regs + NDSR);
531	writel_relaxed(int_mask, nfc->regs + NDSR);
532
533	return reg & int_mask;
534}
535
536static void marvell_nfc_force_byte_access(struct nand_chip *chip,
537					  bool force_8bit)
538{
539	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
540	u32 ndcr;
541
542	/*
543	 * Callers of this function do not verify if the NAND is using a 16-bit
544	 * an 8-bit bus for normal operations, so we need to take care of that
545	 * here by leaving the configuration unchanged if the NAND does not have
546	 * the NAND_BUSWIDTH_16 flag set.
547	 */
548	if (!(chip->options & NAND_BUSWIDTH_16))
549		return;
550
551	ndcr = readl_relaxed(nfc->regs + NDCR);
552
553	if (force_8bit)
554		ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
555	else
556		ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
557
558	writel_relaxed(ndcr, nfc->regs + NDCR);
559}
560
561static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
562{
563	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
564	u32 val;
565	int ret;
566
567	/*
568	 * The command is being processed, wait for the ND_RUN bit to be
569	 * cleared by the NFC. If not, we must clear it by hand.
570	 */
571	ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
572					 (val & NDCR_ND_RUN) == 0,
573					 POLL_PERIOD, POLL_TIMEOUT);
574	if (ret) {
575		dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
576		writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
577			       nfc->regs + NDCR);
578		return ret;
579	}
580
581	return 0;
582}
583
584/*
585 * Any time a command has to be sent to the controller, the following sequence
586 * has to be followed:
587 * - call marvell_nfc_prepare_cmd()
588 *      -> activate the ND_RUN bit that will kind of 'start a job'
589 *      -> wait the signal indicating the NFC is waiting for a command
590 * - send the command (cmd and address cycles)
591 * - enventually send or receive the data
592 * - call marvell_nfc_end_cmd() with the corresponding flag
593 *      -> wait the flag to be triggered or cancel the job with a timeout
594 *
595 * The following helpers are here to factorize the code a bit so that
596 * specialized functions responsible for executing the actual NAND
597 * operations do not have to replicate the same code blocks.
598 */
599static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
600{
601	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
602	u32 ndcr, val;
603	int ret;
604
605	/* Poll ND_RUN and clear NDSR before issuing any command */
606	ret = marvell_nfc_wait_ndrun(chip);
607	if (ret) {
608		dev_err(nfc->dev, "Last operation did not succeed\n");
609		return ret;
610	}
611
612	ndcr = readl_relaxed(nfc->regs + NDCR);
613	writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
614
615	/* Assert ND_RUN bit and wait the NFC to be ready */
616	writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
617	ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
618					 val & NDSR_WRCMDREQ,
619					 POLL_PERIOD, POLL_TIMEOUT);
620	if (ret) {
621		dev_err(nfc->dev, "Timeout on WRCMDRE\n");
622		return -ETIMEDOUT;
623	}
624
625	/* Command may be written, clear WRCMDREQ status bit */
626	writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
627
628	return 0;
629}
630
631static void marvell_nfc_send_cmd(struct nand_chip *chip,
632				 struct marvell_nfc_op *nfc_op)
633{
634	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
635	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
636
637	dev_dbg(nfc->dev, "\nNDCR:  0x%08x\n"
638		"NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
639		(u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
640		nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
641
642	writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
643		       nfc->regs + NDCB0);
644	writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
645	writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
646
647	/*
648	 * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
649	 * fields are used (only available on NFCv2).
650	 */
651	if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
652	    NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
653		if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
654			writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
655	}
656}
657
658static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
659			       const char *label)
660{
661	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
662	u32 val;
663	int ret;
664
665	ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
666					 val & flag,
667					 POLL_PERIOD, POLL_TIMEOUT);
668
669	if (ret) {
670		dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
671			label, val);
672		if (nfc->dma_chan)
673			dmaengine_terminate_all(nfc->dma_chan);
674		return ret;
675	}
676
677	/*
678	 * DMA function uses this helper to poll on CMDD bits without wanting
679	 * them to be cleared.
680	 */
681	if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
682		return 0;
683
684	writel_relaxed(flag, nfc->regs + NDSR);
685
686	return 0;
687}
688
689static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
690{
691	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
692	int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
693
694	return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
695}
696
697static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask,
698				   u32 expected_val, unsigned long timeout_ms)
699{
700	unsigned long limit;
701	u32 st;
702
703	limit = jiffies + msecs_to_jiffies(timeout_ms);
704	do {
705		st = readl_relaxed(nfc->regs + NDSR);
706		if (st & NDSR_RDY(1))
707			st |= NDSR_RDY(0);
708
709		if ((st & mask) == expected_val)
710			return 0;
711
712		cpu_relax();
713	} while (time_after(limit, jiffies));
714
715	return -ETIMEDOUT;
716}
717
718static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
719{
720	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
721	struct mtd_info *mtd = nand_to_mtd(chip);
722	u32 pending;
723	int ret;
724
725	/* Timeout is expressed in ms */
726	if (!timeout_ms)
727		timeout_ms = IRQ_TIMEOUT;
728
729	if (mtd->oops_panic_write) {
730		ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0),
731					      NDSR_RDY(0),
732					      timeout_ms);
733	} else {
734		init_completion(&nfc->complete);
735
736		marvell_nfc_enable_int(nfc, NDCR_RDYM);
737		ret = wait_for_completion_timeout(&nfc->complete,
738						  msecs_to_jiffies(timeout_ms));
739		marvell_nfc_disable_int(nfc, NDCR_RDYM);
740	}
741	pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
742
743	/*
744	 * In case the interrupt was not served in the required time frame,
745	 * check if the ISR was not served or if something went actually wrong.
746	 */
747	if (!ret && !pending) {
748		dev_err(nfc->dev, "Timeout waiting for RB signal\n");
749		return -ETIMEDOUT;
750	}
751
752	return 0;
753}
754
755static void marvell_nfc_select_target(struct nand_chip *chip,
756				      unsigned int die_nr)
757{
758	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
759	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
760	u32 ndcr_generic;
761
762	/*
763	 * Reset the NDCR register to a clean state for this particular chip,
764	 * also clear ND_RUN bit.
765	 */
766	ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
767		       NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
768	writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
769
770	/* Also reset the interrupt status register */
771	marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
772
773	if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
774		return;
775
776	writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
777	writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
778
779	nfc->selected_chip = chip;
780	marvell_nand->selected_die = die_nr;
781}
782
783static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
784{
785	struct marvell_nfc *nfc = dev_id;
786	u32 st = readl_relaxed(nfc->regs + NDSR);
787	u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
788
789	/*
790	 * RDY interrupt mask is one bit in NDCR while there are two status
791	 * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
792	 */
793	if (st & NDSR_RDY(1))
794		st |= NDSR_RDY(0);
795
796	if (!(st & ien))
797		return IRQ_NONE;
798
799	marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
800
801	if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
802		complete(&nfc->complete);
803
804	return IRQ_HANDLED;
805}
806
807/* HW ECC related functions */
808static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
809{
810	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
811	u32 ndcr = readl_relaxed(nfc->regs + NDCR);
812
813	if (!(ndcr & NDCR_ECC_EN)) {
814		writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
815
816		/*
817		 * When enabling BCH, set threshold to 0 to always know the
818		 * number of corrected bitflips.
819		 */
820		if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
821			writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
822	}
823}
824
825static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
826{
827	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
828	u32 ndcr = readl_relaxed(nfc->regs + NDCR);
829
830	if (ndcr & NDCR_ECC_EN) {
831		writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
832		if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
833			writel_relaxed(0, nfc->regs + NDECCCTRL);
834	}
835}
836
837/* DMA related helpers */
838static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
839{
840	u32 reg;
841
842	reg = readl_relaxed(nfc->regs + NDCR);
843	writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
844}
845
846static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
847{
848	u32 reg;
849
850	reg = readl_relaxed(nfc->regs + NDCR);
851	writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
852}
853
854/* Read/write PIO/DMA accessors */
855static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
856				     enum dma_data_direction direction,
857				     unsigned int len)
858{
859	unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
860	struct dma_async_tx_descriptor *tx;
861	struct scatterlist sg;
862	dma_cookie_t cookie;
863	int ret;
864
865	marvell_nfc_enable_dma(nfc);
866	/* Prepare the DMA transfer */
867	sg_init_one(&sg, nfc->dma_buf, dma_len);
868	dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
869	tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
870				     direction == DMA_FROM_DEVICE ?
871				     DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
872				     DMA_PREP_INTERRUPT);
873	if (!tx) {
874		dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
875		return -ENXIO;
876	}
877
878	/* Do the task and wait for it to finish */
879	cookie = dmaengine_submit(tx);
880	ret = dma_submit_error(cookie);
881	if (ret)
882		return -EIO;
883
884	dma_async_issue_pending(nfc->dma_chan);
885	ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
886	dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
887	marvell_nfc_disable_dma(nfc);
888	if (ret) {
889		dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
890			dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
891		dmaengine_terminate_all(nfc->dma_chan);
892		return -ETIMEDOUT;
893	}
894
895	return 0;
896}
897
898static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
899					unsigned int len)
900{
901	unsigned int last_len = len % FIFO_DEPTH;
902	unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
903	int i;
904
905	for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
906		ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
907
908	if (last_len) {
909		u8 tmp_buf[FIFO_DEPTH];
910
911		ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
912		memcpy(in + last_full_offset, tmp_buf, last_len);
913	}
914
915	return 0;
916}
917
918static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
919					 unsigned int len)
920{
921	unsigned int last_len = len % FIFO_DEPTH;
922	unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
923	int i;
924
925	for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
926		iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
927
928	if (last_len) {
929		u8 tmp_buf[FIFO_DEPTH];
930
931		memcpy(tmp_buf, out + last_full_offset, last_len);
932		iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
933	}
934
935	return 0;
936}
937
938static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
939					  u8 *data, int data_len,
940					  u8 *spare, int spare_len,
941					  u8 *ecc, int ecc_len,
942					  unsigned int *max_bitflips)
943{
944	struct mtd_info *mtd = nand_to_mtd(chip);
945	int bf;
946
947	/*
948	 * Blank pages (all 0xFF) that have not been written may be recognized
949	 * as bad if bitflips occur, so whenever an uncorrectable error occurs,
950	 * check if the entire page (with ECC bytes) is actually blank or not.
951	 */
952	if (!data)
953		data_len = 0;
954	if (!spare)
955		spare_len = 0;
956	if (!ecc)
957		ecc_len = 0;
958
959	bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
960					 spare, spare_len, chip->ecc.strength);
961	if (bf < 0) {
962		mtd->ecc_stats.failed++;
963		return;
964	}
965
966	/* Update the stats and max_bitflips */
967	mtd->ecc_stats.corrected += bf;
968	*max_bitflips = max_t(unsigned int, *max_bitflips, bf);
969}
970
971/*
972 * Check if a chunk is correct or not according to the hardware ECC engine.
973 * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
974 * mtd->ecc_stats.failure is not, the function will instead return a non-zero
975 * value indicating that a check on the emptyness of the subpage must be
976 * performed before actually declaring the subpage as "corrupted".
977 */
978static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
979					     unsigned int *max_bitflips)
980{
981	struct mtd_info *mtd = nand_to_mtd(chip);
982	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
983	int bf = 0;
984	u32 ndsr;
985
986	ndsr = readl_relaxed(nfc->regs + NDSR);
987
988	/* Check uncorrectable error flag */
989	if (ndsr & NDSR_UNCERR) {
990		writel_relaxed(ndsr, nfc->regs + NDSR);
991
992		/*
993		 * Do not increment ->ecc_stats.failed now, instead, return a
994		 * non-zero value to indicate that this chunk was apparently
995		 * bad, and it should be check to see if it empty or not. If
996		 * the chunk (with ECC bytes) is not declared empty, the calling
997		 * function must increment the failure count.
998		 */
999		return -EBADMSG;
1000	}
1001
1002	/* Check correctable error flag */
1003	if (ndsr & NDSR_CORERR) {
1004		writel_relaxed(ndsr, nfc->regs + NDSR);
1005
1006		if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
1007			bf = NDSR_ERRCNT(ndsr);
1008		else
1009			bf = 1;
1010	}
1011
1012	/* Update the stats and max_bitflips */
1013	mtd->ecc_stats.corrected += bf;
1014	*max_bitflips = max_t(unsigned int, *max_bitflips, bf);
1015
1016	return 0;
1017}
1018
1019/* Hamming read helpers */
1020static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
1021					       u8 *data_buf, u8 *oob_buf,
1022					       bool raw, int page)
1023{
1024	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1025	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1026	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1027	struct marvell_nfc_op nfc_op = {
1028		.ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
1029			   NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1030			   NDCB0_DBC |
1031			   NDCB0_CMD1(NAND_CMD_READ0) |
1032			   NDCB0_CMD2(NAND_CMD_READSTART),
1033		.ndcb[1] = NDCB1_ADDRS_PAGE(page),
1034		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
1035	};
1036	unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
1037	int ret;
1038
1039	/* NFCv2 needs more information about the operation being executed */
1040	if (nfc->caps->is_nfcv2)
1041		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1042
1043	ret = marvell_nfc_prepare_cmd(chip);
1044	if (ret)
1045		return ret;
1046
1047	marvell_nfc_send_cmd(chip, &nfc_op);
1048	ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1049				  "RDDREQ while draining FIFO (data/oob)");
1050	if (ret)
1051		return ret;
1052
1053	/*
1054	 * Read the page then the OOB area. Unlike what is shown in current
1055	 * documentation, spare bytes are protected by the ECC engine, and must
1056	 * be at the beginning of the OOB area or running this driver on legacy
1057	 * systems will prevent the discovery of the BBM/BBT.
1058	 */
1059	if (nfc->use_dma) {
1060		marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
1061					  lt->data_bytes + oob_bytes);
1062		memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
1063		memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
1064	} else {
1065		marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
1066		marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
1067	}
1068
1069	ret = marvell_nfc_wait_cmdd(chip);
1070	return ret;
1071}
1072
1073static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
1074						int oob_required, int page)
1075{
1076	marvell_nfc_select_target(chip, chip->cur_cs);
1077	return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
1078						   true, page);
1079}
1080
1081static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
1082					    int oob_required, int page)
1083{
1084	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1085	unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1086	int max_bitflips = 0, ret;
1087	u8 *raw_buf;
1088
1089	marvell_nfc_select_target(chip, chip->cur_cs);
1090	marvell_nfc_enable_hw_ecc(chip);
1091	marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
1092					    page);
1093	ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
1094	marvell_nfc_disable_hw_ecc(chip);
1095
1096	if (!ret)
1097		return max_bitflips;
1098
1099	/*
1100	 * When ECC failures are detected, check if the full page has been
1101	 * written or not. Ignore the failure if it is actually empty.
1102	 */
1103	raw_buf = kmalloc(full_sz, GFP_KERNEL);
1104	if (!raw_buf)
1105		return -ENOMEM;
1106
1107	marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
1108					    lt->data_bytes, true, page);
1109	marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
1110				      &max_bitflips);
1111	kfree(raw_buf);
1112
1113	return max_bitflips;
1114}
1115
1116/*
1117 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1118 * it appears before the ECC bytes when reading), the ->read_oob_raw() function
1119 * also stands for ->read_oob().
1120 */
1121static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
1122{
1123	u8 *buf = nand_get_data_buf(chip);
1124
1125	marvell_nfc_select_target(chip, chip->cur_cs);
1126	return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
1127						   true, page);
1128}
1129
1130/* Hamming write helpers */
1131static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
1132						const u8 *data_buf,
1133						const u8 *oob_buf, bool raw,
1134						int page)
1135{
1136	const struct nand_sdr_timings *sdr =
1137		nand_get_sdr_timings(nand_get_interface_config(chip));
1138	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1139	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1140	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1141	struct marvell_nfc_op nfc_op = {
1142		.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
1143			   NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1144			   NDCB0_CMD1(NAND_CMD_SEQIN) |
1145			   NDCB0_CMD2(NAND_CMD_PAGEPROG) |
1146			   NDCB0_DBC,
1147		.ndcb[1] = NDCB1_ADDRS_PAGE(page),
1148		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
1149	};
1150	unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
1151	u8 status;
1152	int ret;
1153
1154	/* NFCv2 needs more information about the operation being executed */
1155	if (nfc->caps->is_nfcv2)
1156		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1157
1158	ret = marvell_nfc_prepare_cmd(chip);
1159	if (ret)
1160		return ret;
1161
1162	marvell_nfc_send_cmd(chip, &nfc_op);
1163	ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1164				  "WRDREQ while loading FIFO (data)");
1165	if (ret)
1166		return ret;
1167
1168	/* Write the page then the OOB area */
1169	if (nfc->use_dma) {
1170		memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
1171		memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
1172		marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
1173					  lt->ecc_bytes + lt->spare_bytes);
1174	} else {
1175		marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
1176		marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
1177	}
1178
1179	ret = marvell_nfc_wait_cmdd(chip);
1180	if (ret)
1181		return ret;
1182
1183	ret = marvell_nfc_wait_op(chip,
1184				  PSEC_TO_MSEC(sdr->tPROG_max));
1185	if (ret)
1186		return ret;
1187
1188	/* Check write status on the chip side */
1189	ret = nand_status_op(chip, &status);
1190	if (ret)
1191		return ret;
1192
1193	if (status & NAND_STATUS_FAIL)
1194		return -EIO;
1195
1196	return 0;
1197}
1198
1199static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
1200						 const u8 *buf,
1201						 int oob_required, int page)
1202{
1203	marvell_nfc_select_target(chip, chip->cur_cs);
1204	return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1205						    true, page);
1206}
1207
1208static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
1209					     const u8 *buf,
1210					     int oob_required, int page)
1211{
1212	int ret;
1213
1214	marvell_nfc_select_target(chip, chip->cur_cs);
1215	marvell_nfc_enable_hw_ecc(chip);
1216	ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1217						   false, page);
1218	marvell_nfc_disable_hw_ecc(chip);
1219
1220	return ret;
1221}
1222
1223/*
1224 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1225 * it appears before the ECC bytes when reading), the ->write_oob_raw() function
1226 * also stands for ->write_oob().
1227 */
1228static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
1229						int page)
1230{
1231	struct mtd_info *mtd = nand_to_mtd(chip);
1232	u8 *buf = nand_get_data_buf(chip);
1233
1234	memset(buf, 0xFF, mtd->writesize);
1235
1236	marvell_nfc_select_target(chip, chip->cur_cs);
1237	return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1238						    true, page);
1239}
1240
1241/* BCH read helpers */
1242static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
1243						int oob_required, int page)
1244{
1245	struct mtd_info *mtd = nand_to_mtd(chip);
1246	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1247	u8 *oob = chip->oob_poi;
1248	int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1249	int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1250		lt->last_spare_bytes;
1251	int data_len = lt->data_bytes;
1252	int spare_len = lt->spare_bytes;
1253	int ecc_len = lt->ecc_bytes;
1254	int chunk;
1255
1256	marvell_nfc_select_target(chip, chip->cur_cs);
1257
1258	if (oob_required)
1259		memset(chip->oob_poi, 0xFF, mtd->oobsize);
1260
1261	nand_read_page_op(chip, page, 0, NULL, 0);
1262
1263	for (chunk = 0; chunk < lt->nchunks; chunk++) {
1264		/* Update last chunk length */
1265		if (chunk >= lt->full_chunk_cnt) {
1266			data_len = lt->last_data_bytes;
1267			spare_len = lt->last_spare_bytes;
1268			ecc_len = lt->last_ecc_bytes;
1269		}
1270
1271		/* Read data bytes*/
1272		nand_change_read_column_op(chip, chunk * chunk_size,
1273					   buf + (lt->data_bytes * chunk),
1274					   data_len, false);
1275
1276		/* Read spare bytes */
1277		nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
1278				  spare_len, false, false);
1279
1280		/* Read ECC bytes */
1281		nand_read_data_op(chip, oob + ecc_offset +
1282				  (ALIGN(lt->ecc_bytes, 32) * chunk),
1283				  ecc_len, false, false);
1284	}
1285
1286	return 0;
1287}
1288
1289static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
1290					      u8 *data, unsigned int data_len,
1291					      u8 *spare, unsigned int spare_len,
1292					      int page)
1293{
1294	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1295	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1296	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1297	int i, ret;
1298	struct marvell_nfc_op nfc_op = {
1299		.ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
1300			   NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1301			   NDCB0_LEN_OVRD,
1302		.ndcb[1] = NDCB1_ADDRS_PAGE(page),
1303		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
1304		.ndcb[3] = data_len + spare_len,
1305	};
1306
1307	ret = marvell_nfc_prepare_cmd(chip);
1308	if (ret)
1309		return;
1310
1311	if (chunk == 0)
1312		nfc_op.ndcb[0] |= NDCB0_DBC |
1313				  NDCB0_CMD1(NAND_CMD_READ0) |
1314				  NDCB0_CMD2(NAND_CMD_READSTART);
1315
1316	/*
1317	 * Trigger the monolithic read on the first chunk, then naked read on
1318	 * intermediate chunks and finally a last naked read on the last chunk.
1319	 */
1320	if (chunk == 0)
1321		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1322	else if (chunk < lt->nchunks - 1)
1323		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1324	else
1325		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1326
1327	marvell_nfc_send_cmd(chip, &nfc_op);
1328
1329	/*
1330	 * According to the datasheet, when reading from NDDB
1331	 * with BCH enabled, after each 32 bytes reads, we
1332	 * have to make sure that the NDSR.RDDREQ bit is set.
1333	 *
1334	 * Drain the FIFO, 8 32-bit reads at a time, and skip
1335	 * the polling on the last read.
1336	 *
1337	 * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
1338	 */
1339	for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1340		marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1341				    "RDDREQ while draining FIFO (data)");
1342		marvell_nfc_xfer_data_in_pio(nfc, data,
1343					     FIFO_DEPTH * BCH_SEQ_READS);
1344		data += FIFO_DEPTH * BCH_SEQ_READS;
1345	}
1346
1347	for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1348		marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1349				    "RDDREQ while draining FIFO (OOB)");
1350		marvell_nfc_xfer_data_in_pio(nfc, spare,
1351					     FIFO_DEPTH * BCH_SEQ_READS);
1352		spare += FIFO_DEPTH * BCH_SEQ_READS;
1353	}
1354}
1355
1356static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
1357					    u8 *buf, int oob_required,
1358					    int page)
1359{
1360	struct mtd_info *mtd = nand_to_mtd(chip);
1361	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1362	int data_len = lt->data_bytes, spare_len = lt->spare_bytes;
1363	u8 *data = buf, *spare = chip->oob_poi;
1364	int max_bitflips = 0;
1365	u32 failure_mask = 0;
1366	int chunk, ret;
1367
1368	marvell_nfc_select_target(chip, chip->cur_cs);
1369
1370	/*
1371	 * With BCH, OOB is not fully used (and thus not read entirely), not
1372	 * expected bytes could show up at the end of the OOB buffer if not
1373	 * explicitly erased.
1374	 */
1375	if (oob_required)
1376		memset(chip->oob_poi, 0xFF, mtd->oobsize);
1377
1378	marvell_nfc_enable_hw_ecc(chip);
1379
1380	for (chunk = 0; chunk < lt->nchunks; chunk++) {
1381		/* Update length for the last chunk */
1382		if (chunk >= lt->full_chunk_cnt) {
1383			data_len = lt->last_data_bytes;
1384			spare_len = lt->last_spare_bytes;
1385		}
1386
1387		/* Read the chunk and detect number of bitflips */
1388		marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
1389						  spare, spare_len, page);
1390		ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
1391		if (ret)
1392			failure_mask |= BIT(chunk);
1393
1394		data += data_len;
1395		spare += spare_len;
1396	}
1397
1398	marvell_nfc_disable_hw_ecc(chip);
1399
1400	if (!failure_mask)
1401		return max_bitflips;
1402
1403	/*
1404	 * Please note that dumping the ECC bytes during a normal read with OOB
1405	 * area would add a significant overhead as ECC bytes are "consumed" by
1406	 * the controller in normal mode and must be re-read in raw mode. To
1407	 * avoid dropping the performances, we prefer not to include them. The
1408	 * user should re-read the page in raw mode if ECC bytes are required.
1409	 */
1410
1411	/*
1412	 * In case there is any subpage read error, we usually re-read only ECC
1413	 * bytes in raw mode and check if the whole page is empty. In this case,
1414	 * it is normal that the ECC check failed and we just ignore the error.
1415	 *
1416	 * However, it has been empirically observed that for some layouts (e.g
1417	 * 2k page, 8b strength per 512B chunk), the controller tries to correct
1418	 * bits and may create itself bitflips in the erased area. To overcome
1419	 * this strange behavior, the whole page is re-read in raw mode, not
1420	 * only the ECC bytes.
1421	 */
1422	for (chunk = 0; chunk < lt->nchunks; chunk++) {
1423		int data_off_in_page, spare_off_in_page, ecc_off_in_page;
1424		int data_off, spare_off, ecc_off;
1425		int data_len, spare_len, ecc_len;
1426
1427		/* No failure reported for this chunk, move to the next one */
1428		if (!(failure_mask & BIT(chunk)))
1429			continue;
1430
1431		data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes +
1432					    lt->ecc_bytes);
1433		spare_off_in_page = data_off_in_page +
1434			(chunk < lt->full_chunk_cnt ? lt->data_bytes :
1435						      lt->last_data_bytes);
1436		ecc_off_in_page = spare_off_in_page +
1437			(chunk < lt->full_chunk_cnt ? lt->spare_bytes :
1438						      lt->last_spare_bytes);
1439
1440		data_off = chunk * lt->data_bytes;
1441		spare_off = chunk * lt->spare_bytes;
1442		ecc_off = (lt->full_chunk_cnt * lt->spare_bytes) +
1443			  lt->last_spare_bytes +
1444			  (chunk * (lt->ecc_bytes + 2));
1445
1446		data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes :
1447							lt->last_data_bytes;
1448		spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes :
1449							 lt->last_spare_bytes;
1450		ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes :
1451						       lt->last_ecc_bytes;
1452
1453		/*
1454		 * Only re-read the ECC bytes, unless we are using the 2k/8b
1455		 * layout which is buggy in the sense that the ECC engine will
1456		 * try to correct data bytes anyway, creating bitflips. In this
1457		 * case, re-read the entire page.
1458		 */
1459		if (lt->writesize == 2048 && lt->strength == 8) {
1460			nand_change_read_column_op(chip, data_off_in_page,
1461						   buf + data_off, data_len,
1462						   false);
1463			nand_change_read_column_op(chip, spare_off_in_page,
1464						   chip->oob_poi + spare_off, spare_len,
1465						   false);
1466		}
1467
1468		nand_change_read_column_op(chip, ecc_off_in_page,
1469					   chip->oob_poi + ecc_off, ecc_len,
1470					   false);
1471
1472		/* Check the entire chunk (data + spare + ecc) for emptyness */
1473		marvell_nfc_check_empty_chunk(chip, buf + data_off, data_len,
1474					      chip->oob_poi + spare_off, spare_len,
1475					      chip->oob_poi + ecc_off, ecc_len,
1476					      &max_bitflips);
1477	}
1478
1479	return max_bitflips;
1480}
1481
1482static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
1483{
1484	u8 *buf = nand_get_data_buf(chip);
1485
1486	return chip->ecc.read_page_raw(chip, buf, true, page);
1487}
1488
1489static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
1490{
1491	u8 *buf = nand_get_data_buf(chip);
1492
1493	return chip->ecc.read_page(chip, buf, true, page);
1494}
1495
1496/* BCH write helpers */
1497static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
1498						 const u8 *buf,
1499						 int oob_required, int page)
1500{
1501	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1502	int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1503	int data_len = lt->data_bytes;
1504	int spare_len = lt->spare_bytes;
1505	int ecc_len = lt->ecc_bytes;
1506	int spare_offset = 0;
1507	int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1508		lt->last_spare_bytes;
1509	int chunk;
1510
1511	marvell_nfc_select_target(chip, chip->cur_cs);
1512
1513	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1514
1515	for (chunk = 0; chunk < lt->nchunks; chunk++) {
1516		if (chunk >= lt->full_chunk_cnt) {
1517			data_len = lt->last_data_bytes;
1518			spare_len = lt->last_spare_bytes;
1519			ecc_len = lt->last_ecc_bytes;
1520		}
1521
1522		/* Point to the column of the next chunk */
1523		nand_change_write_column_op(chip, chunk * full_chunk_size,
1524					    NULL, 0, false);
1525
1526		/* Write the data */
1527		nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
1528				   data_len, false);
1529
1530		if (!oob_required)
1531			continue;
1532
1533		/* Write the spare bytes */
1534		if (spare_len)
1535			nand_write_data_op(chip, chip->oob_poi + spare_offset,
1536					   spare_len, false);
1537
1538		/* Write the ECC bytes */
1539		if (ecc_len)
1540			nand_write_data_op(chip, chip->oob_poi + ecc_offset,
1541					   ecc_len, false);
1542
1543		spare_offset += spare_len;
1544		ecc_offset += ALIGN(ecc_len, 32);
1545	}
1546
1547	return nand_prog_page_end_op(chip);
1548}
1549
1550static int
1551marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
1552				   const u8 *data, unsigned int data_len,
1553				   const u8 *spare, unsigned int spare_len,
1554				   int page)
1555{
1556	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1557	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1558	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1559	u32 xtype;
1560	int ret;
1561	struct marvell_nfc_op nfc_op = {
1562		.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
1563		.ndcb[3] = data_len + spare_len,
1564	};
1565
1566	/*
1567	 * First operation dispatches the CMD_SEQIN command, issue the address
1568	 * cycles and asks for the first chunk of data.
1569	 * All operations in the middle (if any) will issue a naked write and
1570	 * also ask for data.
1571	 * Last operation (if any) asks for the last chunk of data through a
1572	 * last naked write.
1573	 */
1574	if (chunk == 0) {
1575		if (lt->nchunks == 1)
1576			xtype = XTYPE_MONOLITHIC_RW;
1577		else
1578			xtype = XTYPE_WRITE_DISPATCH;
1579
1580		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
1581				  NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1582				  NDCB0_CMD1(NAND_CMD_SEQIN);
1583		nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
1584		nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
1585	} else if (chunk < lt->nchunks - 1) {
1586		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1587	} else {
1588		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1589	}
1590
1591	/* Always dispatch the PAGEPROG command on the last chunk */
1592	if (chunk == lt->nchunks - 1)
1593		nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
1594
1595	ret = marvell_nfc_prepare_cmd(chip);
1596	if (ret)
1597		return ret;
1598
1599	marvell_nfc_send_cmd(chip, &nfc_op);
1600	ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1601				  "WRDREQ while loading FIFO (data)");
1602	if (ret)
1603		return ret;
1604
1605	/* Transfer the contents */
1606	iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
1607	iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
1608
1609	return 0;
1610}
1611
1612static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
1613					     const u8 *buf,
1614					     int oob_required, int page)
1615{
1616	const struct nand_sdr_timings *sdr =
1617		nand_get_sdr_timings(nand_get_interface_config(chip));
1618	struct mtd_info *mtd = nand_to_mtd(chip);
1619	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1620	const u8 *data = buf;
1621	const u8 *spare = chip->oob_poi;
1622	int data_len = lt->data_bytes;
1623	int spare_len = lt->spare_bytes;
1624	int chunk, ret;
1625	u8 status;
1626
1627	marvell_nfc_select_target(chip, chip->cur_cs);
1628
1629	/* Spare data will be written anyway, so clear it to avoid garbage */
1630	if (!oob_required)
1631		memset(chip->oob_poi, 0xFF, mtd->oobsize);
1632
1633	marvell_nfc_enable_hw_ecc(chip);
1634
1635	for (chunk = 0; chunk < lt->nchunks; chunk++) {
1636		if (chunk >= lt->full_chunk_cnt) {
1637			data_len = lt->last_data_bytes;
1638			spare_len = lt->last_spare_bytes;
1639		}
1640
1641		marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
1642						   spare, spare_len, page);
1643		data += data_len;
1644		spare += spare_len;
1645
1646		/*
1647		 * Waiting only for CMDD or PAGED is not enough, ECC are
1648		 * partially written. No flag is set once the operation is
1649		 * really finished but the ND_RUN bit is cleared, so wait for it
1650		 * before stepping into the next command.
1651		 */
1652		marvell_nfc_wait_ndrun(chip);
1653	}
1654
1655	ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
1656
1657	marvell_nfc_disable_hw_ecc(chip);
1658
1659	if (ret)
1660		return ret;
1661
1662	/* Check write status on the chip side */
1663	ret = nand_status_op(chip, &status);
1664	if (ret)
1665		return ret;
1666
1667	if (status & NAND_STATUS_FAIL)
1668		return -EIO;
1669
1670	return 0;
1671}
1672
1673static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
1674						int page)
1675{
1676	struct mtd_info *mtd = nand_to_mtd(chip);
1677	u8 *buf = nand_get_data_buf(chip);
1678
1679	memset(buf, 0xFF, mtd->writesize);
1680
1681	return chip->ecc.write_page_raw(chip, buf, true, page);
1682}
1683
1684static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
1685{
1686	struct mtd_info *mtd = nand_to_mtd(chip);
1687	u8 *buf = nand_get_data_buf(chip);
1688
1689	memset(buf, 0xFF, mtd->writesize);
1690
1691	return chip->ecc.write_page(chip, buf, true, page);
1692}
1693
1694/* NAND framework ->exec_op() hooks and related helpers */
1695static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1696					   const struct nand_subop *subop,
1697					   struct marvell_nfc_op *nfc_op)
1698{
1699	const struct nand_op_instr *instr = NULL;
1700	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1701	bool first_cmd = true;
1702	unsigned int op_id;
1703	int i;
1704
1705	/* Reset the input structure as most of its fields will be OR'ed */
1706	memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
1707
1708	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
1709		unsigned int offset, naddrs;
1710		const u8 *addrs;
1711		int len;
1712
1713		instr = &subop->instrs[op_id];
1714
1715		switch (instr->type) {
1716		case NAND_OP_CMD_INSTR:
1717			if (first_cmd)
1718				nfc_op->ndcb[0] |=
1719					NDCB0_CMD1(instr->ctx.cmd.opcode);
1720			else
1721				nfc_op->ndcb[0] |=
1722					NDCB0_CMD2(instr->ctx.cmd.opcode) |
1723					NDCB0_DBC;
1724
1725			nfc_op->cle_ale_delay_ns = instr->delay_ns;
1726			first_cmd = false;
1727			break;
1728
1729		case NAND_OP_ADDR_INSTR:
1730			offset = nand_subop_get_addr_start_off(subop, op_id);
1731			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
1732			addrs = &instr->ctx.addr.addrs[offset];
1733
1734			nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
1735
1736			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
1737				nfc_op->ndcb[1] |= addrs[i] << (8 * i);
1738
1739			if (naddrs >= 5)
1740				nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
1741			if (naddrs >= 6)
1742				nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
1743			if (naddrs == 7)
1744				nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
1745
1746			nfc_op->cle_ale_delay_ns = instr->delay_ns;
1747			break;
1748
1749		case NAND_OP_DATA_IN_INSTR:
1750			nfc_op->data_instr = instr;
1751			nfc_op->data_instr_idx = op_id;
1752			nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
1753			if (nfc->caps->is_nfcv2) {
1754				nfc_op->ndcb[0] |=
1755					NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1756					NDCB0_LEN_OVRD;
1757				len = nand_subop_get_data_len(subop, op_id);
1758				nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1759			}
1760			nfc_op->data_delay_ns = instr->delay_ns;
1761			break;
1762
1763		case NAND_OP_DATA_OUT_INSTR:
1764			nfc_op->data_instr = instr;
1765			nfc_op->data_instr_idx = op_id;
1766			nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
1767			if (nfc->caps->is_nfcv2) {
1768				nfc_op->ndcb[0] |=
1769					NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1770					NDCB0_LEN_OVRD;
1771				len = nand_subop_get_data_len(subop, op_id);
1772				nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1773			}
1774			nfc_op->data_delay_ns = instr->delay_ns;
1775			break;
1776
1777		case NAND_OP_WAITRDY_INSTR:
1778			nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
1779			nfc_op->rdy_delay_ns = instr->delay_ns;
1780			break;
1781		}
1782	}
1783}
1784
1785static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
1786				     const struct nand_subop *subop,
1787				     struct marvell_nfc_op *nfc_op)
1788{
1789	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1790	const struct nand_op_instr *instr = nfc_op->data_instr;
1791	unsigned int op_id = nfc_op->data_instr_idx;
1792	unsigned int len = nand_subop_get_data_len(subop, op_id);
1793	unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
1794	bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
1795	int ret;
1796
1797	if (instr->ctx.data.force_8bit)
1798		marvell_nfc_force_byte_access(chip, true);
1799
1800	if (reading) {
1801		u8 *in = instr->ctx.data.buf.in + offset;
1802
1803		ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
1804	} else {
1805		const u8 *out = instr->ctx.data.buf.out + offset;
1806
1807		ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
1808	}
1809
1810	if (instr->ctx.data.force_8bit)
1811		marvell_nfc_force_byte_access(chip, false);
1812
1813	return ret;
1814}
1815
1816static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
1817					      const struct nand_subop *subop)
1818{
1819	struct marvell_nfc_op nfc_op;
1820	bool reading;
1821	int ret;
1822
1823	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1824	reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
1825
1826	ret = marvell_nfc_prepare_cmd(chip);
1827	if (ret)
1828		return ret;
1829
1830	marvell_nfc_send_cmd(chip, &nfc_op);
1831	ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1832				  "RDDREQ/WRDREQ while draining raw data");
1833	if (ret)
1834		return ret;
1835
1836	cond_delay(nfc_op.cle_ale_delay_ns);
1837
1838	if (reading) {
1839		if (nfc_op.rdy_timeout_ms) {
1840			ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1841			if (ret)
1842				return ret;
1843		}
1844
1845		cond_delay(nfc_op.rdy_delay_ns);
1846	}
1847
1848	marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1849	ret = marvell_nfc_wait_cmdd(chip);
1850	if (ret)
1851		return ret;
1852
1853	cond_delay(nfc_op.data_delay_ns);
1854
1855	if (!reading) {
1856		if (nfc_op.rdy_timeout_ms) {
1857			ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1858			if (ret)
1859				return ret;
1860		}
1861
1862		cond_delay(nfc_op.rdy_delay_ns);
1863	}
1864
1865	/*
1866	 * NDCR ND_RUN bit should be cleared automatically at the end of each
1867	 * operation but experience shows that the behavior is buggy when it
1868	 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1869	 */
1870	if (!reading) {
1871		struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1872
1873		writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1874			       nfc->regs + NDCR);
1875	}
1876
1877	return 0;
1878}
1879
1880static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
1881					 const struct nand_subop *subop)
1882{
1883	struct marvell_nfc_op nfc_op;
1884	int ret;
1885
1886	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1887
1888	/*
1889	 * Naked access are different in that they need to be flagged as naked
1890	 * by the controller. Reset the controller registers fields that inform
1891	 * on the type and refill them according to the ongoing operation.
1892	 */
1893	nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
1894			    NDCB0_CMD_XTYPE(XTYPE_MASK));
1895	switch (subop->instrs[0].type) {
1896	case NAND_OP_CMD_INSTR:
1897		nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
1898		break;
1899	case NAND_OP_ADDR_INSTR:
1900		nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
1901		break;
1902	case NAND_OP_DATA_IN_INSTR:
1903		nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
1904				  NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1905		break;
1906	case NAND_OP_DATA_OUT_INSTR:
1907		nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
1908				  NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1909		break;
1910	default:
1911		/* This should never happen */
1912		break;
1913	}
1914
1915	ret = marvell_nfc_prepare_cmd(chip);
1916	if (ret)
1917		return ret;
1918
1919	marvell_nfc_send_cmd(chip, &nfc_op);
1920
1921	if (!nfc_op.data_instr) {
1922		ret = marvell_nfc_wait_cmdd(chip);
1923		cond_delay(nfc_op.cle_ale_delay_ns);
1924		return ret;
1925	}
1926
1927	ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1928				  "RDDREQ/WRDREQ while draining raw data");
1929	if (ret)
1930		return ret;
1931
1932	marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1933	ret = marvell_nfc_wait_cmdd(chip);
1934	if (ret)
1935		return ret;
1936
1937	/*
1938	 * NDCR ND_RUN bit should be cleared automatically at the end of each
1939	 * operation but experience shows that the behavior is buggy when it
1940	 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1941	 */
1942	if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
1943		struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1944
1945		writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1946			       nfc->regs + NDCR);
1947	}
1948
1949	return 0;
1950}
1951
1952static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
1953					  const struct nand_subop *subop)
1954{
1955	struct marvell_nfc_op nfc_op;
1956	int ret;
1957
1958	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1959
1960	ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1961	cond_delay(nfc_op.rdy_delay_ns);
1962
1963	return ret;
1964}
1965
1966static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
1967					 const struct nand_subop *subop)
1968{
1969	struct marvell_nfc_op nfc_op;
1970	int ret;
1971
1972	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1973	nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1974	nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
1975
1976	ret = marvell_nfc_prepare_cmd(chip);
1977	if (ret)
1978		return ret;
1979
1980	marvell_nfc_send_cmd(chip, &nfc_op);
1981	ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1982				  "RDDREQ while reading ID");
1983	if (ret)
1984		return ret;
1985
1986	cond_delay(nfc_op.cle_ale_delay_ns);
1987
1988	if (nfc_op.rdy_timeout_ms) {
1989		ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1990		if (ret)
1991			return ret;
1992	}
1993
1994	cond_delay(nfc_op.rdy_delay_ns);
1995
1996	marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1997	ret = marvell_nfc_wait_cmdd(chip);
1998	if (ret)
1999		return ret;
2000
2001	cond_delay(nfc_op.data_delay_ns);
2002
2003	return 0;
2004}
2005
2006static int marvell_nfc_read_status_exec(struct nand_chip *chip,
2007					const struct nand_subop *subop)
2008{
2009	struct marvell_nfc_op nfc_op;
2010	int ret;
2011
2012	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
2013	nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
2014	nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
2015
2016	ret = marvell_nfc_prepare_cmd(chip);
2017	if (ret)
2018		return ret;
2019
2020	marvell_nfc_send_cmd(chip, &nfc_op);
2021	ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
2022				  "RDDREQ while reading status");
2023	if (ret)
2024		return ret;
2025
2026	cond_delay(nfc_op.cle_ale_delay_ns);
2027
2028	if (nfc_op.rdy_timeout_ms) {
2029		ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
2030		if (ret)
2031			return ret;
2032	}
2033
2034	cond_delay(nfc_op.rdy_delay_ns);
2035
2036	marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
2037	ret = marvell_nfc_wait_cmdd(chip);
2038	if (ret)
2039		return ret;
2040
2041	cond_delay(nfc_op.data_delay_ns);
2042
2043	return 0;
2044}
2045
2046static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
2047					   const struct nand_subop *subop)
2048{
2049	struct marvell_nfc_op nfc_op;
2050	int ret;
2051
2052	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
2053	nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
2054
2055	ret = marvell_nfc_prepare_cmd(chip);
2056	if (ret)
2057		return ret;
2058
2059	marvell_nfc_send_cmd(chip, &nfc_op);
2060	ret = marvell_nfc_wait_cmdd(chip);
2061	if (ret)
2062		return ret;
2063
2064	cond_delay(nfc_op.cle_ale_delay_ns);
2065
2066	ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
2067	if (ret)
2068		return ret;
2069
2070	cond_delay(nfc_op.rdy_delay_ns);
2071
2072	return 0;
2073}
2074
2075static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
2076					   const struct nand_subop *subop)
2077{
2078	struct marvell_nfc_op nfc_op;
2079	int ret;
2080
2081	marvell_nfc_parse_instructions(chip, subop, &nfc_op);
2082	nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
2083
2084	ret = marvell_nfc_prepare_cmd(chip);
2085	if (ret)
2086		return ret;
2087
2088	marvell_nfc_send_cmd(chip, &nfc_op);
2089	ret = marvell_nfc_wait_cmdd(chip);
2090	if (ret)
2091		return ret;
2092
2093	cond_delay(nfc_op.cle_ale_delay_ns);
2094
2095	ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
2096	if (ret)
2097		return ret;
2098
2099	cond_delay(nfc_op.rdy_delay_ns);
2100
2101	return 0;
2102}
2103
2104static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
2105	/* Monolithic reads/writes */
2106	NAND_OP_PARSER_PATTERN(
2107		marvell_nfc_monolithic_access_exec,
2108		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2109		NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
2110		NAND_OP_PARSER_PAT_CMD_ELEM(true),
2111		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
2112		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
2113	NAND_OP_PARSER_PATTERN(
2114		marvell_nfc_monolithic_access_exec,
2115		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2116		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
2117		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
2118		NAND_OP_PARSER_PAT_CMD_ELEM(true),
2119		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
2120	/* Naked commands */
2121	NAND_OP_PARSER_PATTERN(
2122		marvell_nfc_naked_access_exec,
2123		NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2124	NAND_OP_PARSER_PATTERN(
2125		marvell_nfc_naked_access_exec,
2126		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
2127	NAND_OP_PARSER_PATTERN(
2128		marvell_nfc_naked_access_exec,
2129		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
2130	NAND_OP_PARSER_PATTERN(
2131		marvell_nfc_naked_access_exec,
2132		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
2133	NAND_OP_PARSER_PATTERN(
2134		marvell_nfc_naked_waitrdy_exec,
2135		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2136	);
2137
2138static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
2139	/* Naked commands not supported, use a function for each pattern */
2140	NAND_OP_PARSER_PATTERN(
2141		marvell_nfc_read_id_type_exec,
2142		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2143		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
2144		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
2145	NAND_OP_PARSER_PATTERN(
2146		marvell_nfc_erase_cmd_type_exec,
2147		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2148		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
2149		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2150		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2151	NAND_OP_PARSER_PATTERN(
2152		marvell_nfc_read_status_exec,
2153		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2154		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
2155	NAND_OP_PARSER_PATTERN(
2156		marvell_nfc_reset_cmd_type_exec,
2157		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2158		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2159	NAND_OP_PARSER_PATTERN(
2160		marvell_nfc_naked_waitrdy_exec,
2161		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2162	);
2163
2164static int marvell_nfc_exec_op(struct nand_chip *chip,
2165			       const struct nand_operation *op,
2166			       bool check_only)
2167{
2168	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2169
2170	if (!check_only)
2171		marvell_nfc_select_target(chip, op->cs);
2172
2173	if (nfc->caps->is_nfcv2)
2174		return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
2175					      op, check_only);
2176	else
2177		return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
2178					      op, check_only);
2179}
2180
2181/*
2182 * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
2183 * usable.
2184 */
2185static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2186				      struct mtd_oob_region *oobregion)
2187{
2188	struct nand_chip *chip = mtd_to_nand(mtd);
2189	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2190
2191	if (section)
2192		return -ERANGE;
2193
2194	oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
2195			    lt->last_ecc_bytes;
2196	oobregion->offset = mtd->oobsize - oobregion->length;
2197
2198	return 0;
2199}
2200
2201static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
2202				       struct mtd_oob_region *oobregion)
2203{
2204	struct nand_chip *chip = mtd_to_nand(mtd);
2205	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2206
2207	if (section)
2208		return -ERANGE;
2209
2210	/*
2211	 * Bootrom looks in bytes 0 & 5 for bad blocks for the
2212	 * 4KB page / 4bit BCH combination.
2213	 */
2214	if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
2215		oobregion->offset = 6;
2216	else
2217		oobregion->offset = 2;
2218
2219	oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
2220			    lt->last_spare_bytes - oobregion->offset;
2221
2222	return 0;
2223}
2224
2225static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
2226	.ecc = marvell_nand_ooblayout_ecc,
2227	.free = marvell_nand_ooblayout_free,
2228};
2229
2230static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
2231					       struct nand_ecc_ctrl *ecc)
2232{
2233	struct nand_chip *chip = mtd_to_nand(mtd);
2234	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2235	const struct marvell_hw_ecc_layout *l;
2236	int i;
2237
2238	if (!nfc->caps->is_nfcv2 &&
2239	    (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
2240		dev_err(nfc->dev,
2241			"NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
2242			mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
2243		return -ENOTSUPP;
2244	}
2245
2246	to_marvell_nand(chip)->layout = NULL;
2247	for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
2248		l = &marvell_nfc_layouts[i];
2249		if (mtd->writesize == l->writesize &&
2250		    ecc->size == l->chunk && ecc->strength == l->strength) {
2251			to_marvell_nand(chip)->layout = l;
2252			break;
2253		}
2254	}
2255
2256	if (!to_marvell_nand(chip)->layout ||
2257	    (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
2258		dev_err(nfc->dev,
2259			"ECC strength %d at page size %d is not supported\n",
2260			ecc->strength, mtd->writesize);
2261		return -ENOTSUPP;
2262	}
2263
2264	/* Special care for the layout 2k/8-bit/512B  */
2265	if (l->writesize == 2048 && l->strength == 8) {
2266		if (mtd->oobsize < 128) {
2267			dev_err(nfc->dev, "Requested layout needs at least 128 OOB bytes\n");
2268			return -ENOTSUPP;
2269		} else {
2270			chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
2271		}
2272	}
2273
2274	mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
2275	ecc->steps = l->nchunks;
2276	ecc->size = l->data_bytes;
2277
2278	if (ecc->strength == 1) {
2279		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
2280		ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
2281		ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
2282		ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
2283		ecc->read_oob = ecc->read_oob_raw;
2284		ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
2285		ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
2286		ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
2287		ecc->write_oob = ecc->write_oob_raw;
2288	} else {
2289		chip->ecc.algo = NAND_ECC_ALGO_BCH;
2290		ecc->strength = 16;
2291		ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
2292		ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
2293		ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
2294		ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
2295		ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
2296		ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
2297		ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
2298		ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
2299	}
2300
2301	return 0;
2302}
2303
2304static int marvell_nand_ecc_init(struct mtd_info *mtd,
2305				 struct nand_ecc_ctrl *ecc)
2306{
2307	struct nand_chip *chip = mtd_to_nand(mtd);
2308	const struct nand_ecc_props *requirements =
2309		nanddev_get_ecc_requirements(&chip->base);
2310	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2311	int ret;
2312
2313	if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
2314	    (!ecc->size || !ecc->strength)) {
2315		if (requirements->step_size && requirements->strength) {
2316			ecc->size = requirements->step_size;
2317			ecc->strength = requirements->strength;
2318		} else {
2319			dev_info(nfc->dev,
2320				 "No minimum ECC strength, using 1b/512B\n");
2321			ecc->size = 512;
2322			ecc->strength = 1;
2323		}
2324	}
2325
2326	switch (ecc->engine_type) {
2327	case NAND_ECC_ENGINE_TYPE_ON_HOST:
2328		ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
2329		if (ret)
2330			return ret;
2331		break;
2332	case NAND_ECC_ENGINE_TYPE_NONE:
2333	case NAND_ECC_ENGINE_TYPE_SOFT:
2334	case NAND_ECC_ENGINE_TYPE_ON_DIE:
2335		if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
2336		    mtd->writesize != SZ_2K) {
2337			dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
2338				mtd->writesize);
2339			return -EINVAL;
2340		}
2341		break;
2342	default:
2343		return -EINVAL;
2344	}
2345
2346	return 0;
2347}
2348
2349static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
2350static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
2351
2352static struct nand_bbt_descr bbt_main_descr = {
2353	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2354		   NAND_BBT_2BIT | NAND_BBT_VERSION,
2355	.offs =	8,
2356	.len = 6,
2357	.veroffs = 14,
2358	.maxblocks = 8,	/* Last 8 blocks in each chip */
2359	.pattern = bbt_pattern
2360};
2361
2362static struct nand_bbt_descr bbt_mirror_descr = {
2363	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2364		   NAND_BBT_2BIT | NAND_BBT_VERSION,
2365	.offs =	8,
2366	.len = 6,
2367	.veroffs = 14,
2368	.maxblocks = 8,	/* Last 8 blocks in each chip */
2369	.pattern = bbt_mirror_pattern
2370};
2371
2372static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
2373				       const struct nand_interface_config *conf)
2374{
2375	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
2376	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2377	unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2;
2378	const struct nand_sdr_timings *sdr;
2379	struct marvell_nfc_timings nfc_tmg;
2380	int read_delay;
2381
2382	sdr = nand_get_sdr_timings(conf);
2383	if (IS_ERR(sdr))
2384		return PTR_ERR(sdr);
2385
2386	/*
2387	 * SDR timings are given in pico-seconds while NFC timings must be
2388	 * expressed in NAND controller clock cycles, which is half of the
2389	 * frequency of the accessible ECC clock retrieved by clk_get_rate().
2390	 * This is not written anywhere in the datasheet but was observed
2391	 * with an oscilloscope.
2392	 *
2393	 * NFC datasheet gives equations from which thoses calculations
2394	 * are derived, they tend to be slightly more restrictives than the
2395	 * given core timings and may improve the overall speed.
2396	 */
2397	nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
2398	nfc_tmg.tRH = nfc_tmg.tRP;
2399	nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
2400	nfc_tmg.tWH = nfc_tmg.tWP;
2401	nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
2402	nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
2403	nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
2404	/*
2405	 * Read delay is the time of propagation from SoC pins to NFC internal
2406	 * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
2407	 * EDO mode, an additional delay of tRH must be taken into account so
2408	 * the data is sampled on the falling edge instead of the rising edge.
2409	 */
2410	read_delay = sdr->tRC_min >= 30000 ?
2411		MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
2412
2413	nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
2414	/*
2415	 * tWHR and tRHW are supposed to be read to write delays (and vice
2416	 * versa) but in some cases, ie. when doing a change column, they must
2417	 * be greater than that to be sure tCCS delay is respected.
2418	 */
2419	nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
2420				 period_ns) - 2,
2421	nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
2422				 period_ns);
2423
2424	/*
2425	 * NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays.
2426	 * NFCv1: No WAIT_MODE, tR must be maximal.
2427	 */
2428	if (nfc->caps->is_nfcv2) {
2429		nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
2430	} else {
2431		nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max,
2432					 period_ns);
2433		if (nfc_tmg.tR + 3 > nfc_tmg.tCH)
2434			nfc_tmg.tR = nfc_tmg.tCH - 3;
2435		else
2436			nfc_tmg.tR = 0;
2437	}
2438
2439	if (chipnr < 0)
2440		return 0;
2441
2442	marvell_nand->ndtr0 =
2443		NDTR0_TRP(nfc_tmg.tRP) |
2444		NDTR0_TRH(nfc_tmg.tRH) |
2445		NDTR0_ETRP(nfc_tmg.tRP) |
2446		NDTR0_TWP(nfc_tmg.tWP) |
2447		NDTR0_TWH(nfc_tmg.tWH) |
2448		NDTR0_TCS(nfc_tmg.tCS) |
2449		NDTR0_TCH(nfc_tmg.tCH);
2450
2451	marvell_nand->ndtr1 =
2452		NDTR1_TAR(nfc_tmg.tAR) |
2453		NDTR1_TWHR(nfc_tmg.tWHR) |
2454		NDTR1_TR(nfc_tmg.tR);
2455
2456	if (nfc->caps->is_nfcv2) {
2457		marvell_nand->ndtr0 |=
2458			NDTR0_RD_CNT_DEL(read_delay) |
2459			NDTR0_SELCNTR |
2460			NDTR0_TADL(nfc_tmg.tADL);
2461
2462		marvell_nand->ndtr1 |=
2463			NDTR1_TRHW(nfc_tmg.tRHW) |
2464			NDTR1_WAIT_MODE;
2465	}
2466
2467	/*
2468	 * Reset nfc->selected_chip so the next command will cause the timing
2469	 * registers to be updated in marvell_nfc_select_target().
2470	 */
2471	nfc->selected_chip = NULL;
2472
2473	return 0;
2474}
2475
2476static int marvell_nand_attach_chip(struct nand_chip *chip)
2477{
2478	struct mtd_info *mtd = nand_to_mtd(chip);
2479	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
2480	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2481	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
2482	int ret;
2483
2484	if (pdata && pdata->flash_bbt)
2485		chip->bbt_options |= NAND_BBT_USE_FLASH;
2486
2487	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2488		/*
2489		 * We'll use a bad block table stored in-flash and don't
2490		 * allow writing the bad block marker to the flash.
2491		 */
2492		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
2493		chip->bbt_td = &bbt_main_descr;
2494		chip->bbt_md = &bbt_mirror_descr;
2495	}
2496
2497	/* Save the chip-specific fields of NDCR */
2498	marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
2499	if (chip->options & NAND_BUSWIDTH_16)
2500		marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
2501
2502	/*
2503	 * On small page NANDs, only one cycle is needed to pass the
2504	 * column address.
2505	 */
2506	if (mtd->writesize <= 512) {
2507		marvell_nand->addr_cyc = 1;
2508	} else {
2509		marvell_nand->addr_cyc = 2;
2510		marvell_nand->ndcr |= NDCR_RA_START;
2511	}
2512
2513	/*
2514	 * Now add the number of cycles needed to pass the row
2515	 * address.
2516	 *
2517	 * Addressing a chip using CS 2 or 3 should also need the third row
2518	 * cycle but due to inconsistance in the documentation and lack of
2519	 * hardware to test this situation, this case is not supported.
2520	 */
2521	if (chip->options & NAND_ROW_ADDR_3)
2522		marvell_nand->addr_cyc += 3;
2523	else
2524		marvell_nand->addr_cyc += 2;
2525
2526	if (pdata) {
2527		chip->ecc.size = pdata->ecc_step_size;
2528		chip->ecc.strength = pdata->ecc_strength;
2529	}
2530
2531	ret = marvell_nand_ecc_init(mtd, &chip->ecc);
2532	if (ret) {
2533		dev_err(nfc->dev, "ECC init failed: %d\n", ret);
2534		return ret;
2535	}
2536
2537	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
2538		/*
2539		 * Subpage write not available with hardware ECC, prohibit also
2540		 * subpage read as in userspace subpage access would still be
2541		 * allowed and subpage write, if used, would lead to numerous
2542		 * uncorrectable ECC errors.
2543		 */
2544		chip->options |= NAND_NO_SUBPAGE_WRITE;
2545	}
2546
2547	if (pdata || nfc->caps->legacy_of_bindings) {
2548		/*
2549		 * We keep the MTD name unchanged to avoid breaking platforms
2550		 * where the MTD cmdline parser is used and the bootloader
2551		 * has not been updated to use the new naming scheme.
2552		 */
2553		mtd->name = "pxa3xx_nand-0";
2554	} else if (!mtd->name) {
2555		/*
2556		 * If the new bindings are used and the bootloader has not been
2557		 * updated to pass a new mtdparts parameter on the cmdline, you
2558		 * should define the following property in your NAND node, ie:
2559		 *
2560		 *	label = "main-storage";
2561		 *
2562		 * This way, mtd->name will be set by the core when
2563		 * nand_set_flash_node() is called.
2564		 */
2565		mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
2566					   "%s:nand.%d", dev_name(nfc->dev),
2567					   marvell_nand->sels[0].cs);
2568		if (!mtd->name) {
2569			dev_err(nfc->dev, "Failed to allocate mtd->name\n");
2570			return -ENOMEM;
2571		}
2572	}
2573
2574	return 0;
2575}
2576
2577static const struct nand_controller_ops marvell_nand_controller_ops = {
2578	.attach_chip = marvell_nand_attach_chip,
2579	.exec_op = marvell_nfc_exec_op,
2580	.setup_interface = marvell_nfc_setup_interface,
2581};
2582
2583static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2584				  struct device_node *np)
2585{
2586	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
2587	struct marvell_nand_chip *marvell_nand;
2588	struct mtd_info *mtd;
2589	struct nand_chip *chip;
2590	int nsels, ret, i;
2591	u32 cs, rb;
2592
2593	/*
2594	 * The legacy "num-cs" property indicates the number of CS on the only
2595	 * chip connected to the controller (legacy bindings does not support
2596	 * more than one chip). The CS and RB pins are always the #0.
2597	 *
2598	 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2599	 * properties must be filled. For each chip, expressed as a subnode,
2600	 * "reg" points to the CS lines and "nand-rb" to the RB line.
2601	 */
2602	if (pdata || nfc->caps->legacy_of_bindings) {
2603		nsels = 1;
2604	} else {
2605		nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2606		if (nsels <= 0) {
2607			dev_err(dev, "missing/invalid reg property\n");
2608			return -EINVAL;
2609		}
2610	}
2611
2612	/* Alloc the nand chip structure */
2613	marvell_nand = devm_kzalloc(dev,
2614				    struct_size(marvell_nand, sels, nsels),
2615				    GFP_KERNEL);
2616	if (!marvell_nand) {
2617		dev_err(dev, "could not allocate chip structure\n");
2618		return -ENOMEM;
2619	}
2620
2621	marvell_nand->nsels = nsels;
2622	marvell_nand->selected_die = -1;
2623
2624	for (i = 0; i < nsels; i++) {
2625		if (pdata || nfc->caps->legacy_of_bindings) {
2626			/*
2627			 * Legacy bindings use the CS lines in natural
2628			 * order (0, 1, ...)
2629			 */
2630			cs = i;
2631		} else {
2632			/* Retrieve CS id */
2633			ret = of_property_read_u32_index(np, "reg", i, &cs);
2634			if (ret) {
2635				dev_err(dev, "could not retrieve reg property: %d\n",
2636					ret);
2637				return ret;
2638			}
2639		}
2640
2641		if (cs >= nfc->caps->max_cs_nb) {
2642			dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
2643				cs, nfc->caps->max_cs_nb);
2644			return -EINVAL;
2645		}
2646
2647		if (test_and_set_bit(cs, &nfc->assigned_cs)) {
2648			dev_err(dev, "CS %d already assigned\n", cs);
2649			return -EINVAL;
2650		}
2651
2652		/*
2653		 * The cs variable represents the chip select id, which must be
2654		 * converted in bit fields for NDCB0 and NDCB2 to select the
2655		 * right chip. Unfortunately, due to a lack of information on
2656		 * the subject and incoherent documentation, the user should not
2657		 * use CS1 and CS3 at all as asserting them is not supported in
2658		 * a reliable way (due to multiplexing inside ADDR5 field).
2659		 */
2660		marvell_nand->sels[i].cs = cs;
2661		switch (cs) {
2662		case 0:
2663		case 2:
2664			marvell_nand->sels[i].ndcb0_csel = 0;
2665			break;
2666		case 1:
2667		case 3:
2668			marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
2669			break;
2670		default:
2671			return -EINVAL;
2672		}
2673
2674		/* Retrieve RB id */
2675		if (pdata || nfc->caps->legacy_of_bindings) {
2676			/* Legacy bindings always use RB #0 */
2677			rb = 0;
2678		} else {
2679			ret = of_property_read_u32_index(np, "nand-rb", i,
2680							 &rb);
2681			if (ret) {
2682				dev_err(dev,
2683					"could not retrieve RB property: %d\n",
2684					ret);
2685				return ret;
2686			}
2687		}
2688
2689		if (rb >= nfc->caps->max_rb_nb) {
2690			dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
2691				rb, nfc->caps->max_rb_nb);
2692			return -EINVAL;
2693		}
2694
2695		marvell_nand->sels[i].rb = rb;
2696	}
2697
2698	chip = &marvell_nand->chip;
2699	chip->controller = &nfc->controller;
2700	nand_set_flash_node(chip, np);
2701
2702	if (of_property_read_bool(np, "marvell,nand-keep-config"))
2703		chip->options |= NAND_KEEP_TIMINGS;
2704
2705	mtd = nand_to_mtd(chip);
2706	mtd->dev.parent = dev;
2707
2708	/*
2709	 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2710	 * in the DT node, this entry will be overwritten in nand_scan_ident().
2711	 */
2712	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2713
2714	/*
2715	 * Save a reference value for timing registers before
2716	 * ->setup_interface() is called.
2717	 */
2718	marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
2719	marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
2720
2721	chip->options |= NAND_BUSWIDTH_AUTO;
2722
2723	ret = nand_scan(chip, marvell_nand->nsels);
2724	if (ret) {
2725		dev_err(dev, "could not scan the nand chip\n");
2726		return ret;
2727	}
2728
2729	if (pdata)
2730		/* Legacy bindings support only one chip */
2731		ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
2732	else
2733		ret = mtd_device_register(mtd, NULL, 0);
2734	if (ret) {
2735		dev_err(dev, "failed to register mtd device: %d\n", ret);
2736		nand_cleanup(chip);
2737		return ret;
2738	}
2739
2740	list_add_tail(&marvell_nand->node, &nfc->chips);
2741
2742	return 0;
2743}
2744
2745static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
2746{
2747	struct marvell_nand_chip *entry, *temp;
2748	struct nand_chip *chip;
2749	int ret;
2750
2751	list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
2752		chip = &entry->chip;
2753		ret = mtd_device_unregister(nand_to_mtd(chip));
2754		WARN_ON(ret);
2755		nand_cleanup(chip);
2756		list_del(&entry->node);
2757	}
2758}
2759
2760static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
2761{
2762	struct device_node *np = dev->of_node;
2763	struct device_node *nand_np;
2764	int max_cs = nfc->caps->max_cs_nb;
2765	int nchips;
2766	int ret;
2767
2768	if (!np)
2769		nchips = 1;
2770	else
2771		nchips = of_get_child_count(np);
2772
2773	if (nchips > max_cs) {
2774		dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
2775			max_cs);
2776		return -EINVAL;
2777	}
2778
2779	/*
2780	 * Legacy bindings do not use child nodes to exhibit NAND chip
2781	 * properties and layout. Instead, NAND properties are mixed with the
2782	 * controller ones, and partitions are defined as direct subnodes of the
2783	 * NAND controller node.
2784	 */
2785	if (nfc->caps->legacy_of_bindings) {
2786		ret = marvell_nand_chip_init(dev, nfc, np);
2787		return ret;
2788	}
2789
2790	for_each_child_of_node(np, nand_np) {
2791		ret = marvell_nand_chip_init(dev, nfc, nand_np);
2792		if (ret) {
2793			of_node_put(nand_np);
2794			goto cleanup_chips;
2795		}
2796	}
2797
2798	return 0;
2799
2800cleanup_chips:
2801	marvell_nand_chips_cleanup(nfc);
2802
2803	return ret;
2804}
2805
2806static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
2807{
2808	struct platform_device *pdev = container_of(nfc->dev,
2809						    struct platform_device,
2810						    dev);
2811	struct dma_slave_config config = {};
2812	struct resource *r;
2813	int ret;
2814
2815	if (!IS_ENABLED(CONFIG_PXA_DMA)) {
2816		dev_warn(nfc->dev,
2817			 "DMA not enabled in configuration\n");
2818		return -ENOTSUPP;
2819	}
2820
2821	ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
2822	if (ret)
2823		return ret;
2824
2825	nfc->dma_chan =	dma_request_chan(nfc->dev, "data");
2826	if (IS_ERR(nfc->dma_chan)) {
2827		ret = PTR_ERR(nfc->dma_chan);
2828		nfc->dma_chan = NULL;
2829		return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n");
2830	}
2831
2832	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2833	if (!r) {
2834		ret = -ENXIO;
2835		goto release_channel;
2836	}
2837
2838	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2839	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2840	config.src_addr = r->start + NDDB;
2841	config.dst_addr = r->start + NDDB;
2842	config.src_maxburst = 32;
2843	config.dst_maxburst = 32;
2844	ret = dmaengine_slave_config(nfc->dma_chan, &config);
2845	if (ret < 0) {
2846		dev_err(nfc->dev, "Failed to configure DMA channel\n");
2847		goto release_channel;
2848	}
2849
2850	/*
2851	 * DMA must act on length multiple of 32 and this length may be
2852	 * bigger than the destination buffer. Use this buffer instead
2853	 * for DMA transfers and then copy the desired amount of data to
2854	 * the provided buffer.
2855	 */
2856	nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
2857	if (!nfc->dma_buf) {
2858		ret = -ENOMEM;
2859		goto release_channel;
2860	}
2861
2862	nfc->use_dma = true;
2863
2864	return 0;
2865
2866release_channel:
2867	dma_release_channel(nfc->dma_chan);
2868	nfc->dma_chan = NULL;
2869
2870	return ret;
2871}
2872
2873static void marvell_nfc_reset(struct marvell_nfc *nfc)
2874{
2875	/*
2876	 * ECC operations and interruptions are only enabled when specifically
2877	 * needed. ECC shall not be activated in the early stages (fails probe).
2878	 * Arbiter flag, even if marked as "reserved", must be set (empirical).
2879	 * SPARE_EN bit must always be set or ECC bytes will not be at the same
2880	 * offset in the read page and this will fail the protection.
2881	 */
2882	writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
2883		       NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
2884	writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
2885	writel_relaxed(0, nfc->regs + NDECCCTRL);
2886}
2887
2888static int marvell_nfc_init(struct marvell_nfc *nfc)
2889{
2890	struct device_node *np = nfc->dev->of_node;
2891
2892	/*
2893	 * Some SoCs like A7k/A8k need to enable manually the NAND
2894	 * controller, gated clocks and reset bits to avoid being bootloader
2895	 * dependent. This is done through the use of the System Functions
2896	 * registers.
2897	 */
2898	if (nfc->caps->need_system_controller) {
2899		struct regmap *sysctrl_base =
2900			syscon_regmap_lookup_by_phandle(np,
2901							"marvell,system-controller");
2902
2903		if (IS_ERR(sysctrl_base))
2904			return PTR_ERR(sysctrl_base);
2905
2906		regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX,
2907			     GENCONF_SOC_DEVICE_MUX_NFC_EN |
2908			     GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
2909			     GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
2910			     GENCONF_SOC_DEVICE_MUX_NFC_INT_EN);
2911
2912		regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
2913				   GENCONF_CLK_GATING_CTRL_ND_GATE,
2914				   GENCONF_CLK_GATING_CTRL_ND_GATE);
2915	}
2916
2917	/* Configure the DMA if appropriate */
2918	if (!nfc->caps->is_nfcv2)
2919		marvell_nfc_init_dma(nfc);
2920
2921	marvell_nfc_reset(nfc);
2922
2923	return 0;
2924}
2925
2926static int marvell_nfc_probe(struct platform_device *pdev)
2927{
2928	struct device *dev = &pdev->dev;
2929	struct marvell_nfc *nfc;
2930	int ret;
2931	int irq;
2932
2933	nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
2934			   GFP_KERNEL);
2935	if (!nfc)
2936		return -ENOMEM;
2937
2938	nfc->dev = dev;
2939	nand_controller_init(&nfc->controller);
2940	nfc->controller.ops = &marvell_nand_controller_ops;
2941	INIT_LIST_HEAD(&nfc->chips);
2942
2943	nfc->regs = devm_platform_ioremap_resource(pdev, 0);
2944	if (IS_ERR(nfc->regs))
2945		return PTR_ERR(nfc->regs);
2946
2947	irq = platform_get_irq(pdev, 0);
2948	if (irq < 0)
2949		return irq;
2950
2951	nfc->core_clk = devm_clk_get(&pdev->dev, "core");
2952
2953	/* Managed the legacy case (when the first clock was not named) */
2954	if (nfc->core_clk == ERR_PTR(-ENOENT))
2955		nfc->core_clk = devm_clk_get(&pdev->dev, NULL);
2956
2957	if (IS_ERR(nfc->core_clk))
2958		return PTR_ERR(nfc->core_clk);
2959
2960	ret = clk_prepare_enable(nfc->core_clk);
2961	if (ret)
2962		return ret;
2963
2964	nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
2965	if (IS_ERR(nfc->reg_clk)) {
2966		if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
2967			ret = PTR_ERR(nfc->reg_clk);
2968			goto unprepare_core_clk;
2969		}
2970
2971		nfc->reg_clk = NULL;
2972	}
2973
2974	ret = clk_prepare_enable(nfc->reg_clk);
2975	if (ret)
2976		goto unprepare_core_clk;
2977
2978	marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
2979	marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
2980	ret = devm_request_irq(dev, irq, marvell_nfc_isr,
2981			       0, "marvell-nfc", nfc);
2982	if (ret)
2983		goto unprepare_reg_clk;
2984
2985	/* Get NAND controller capabilities */
2986	if (pdev->id_entry)
2987		nfc->caps = (void *)pdev->id_entry->driver_data;
2988	else
2989		nfc->caps = of_device_get_match_data(&pdev->dev);
2990
2991	if (!nfc->caps) {
2992		dev_err(dev, "Could not retrieve NFC caps\n");
2993		ret = -EINVAL;
2994		goto unprepare_reg_clk;
2995	}
2996
2997	/* Init the controller and then probe the chips */
2998	ret = marvell_nfc_init(nfc);
2999	if (ret)
3000		goto unprepare_reg_clk;
3001
3002	platform_set_drvdata(pdev, nfc);
3003
3004	ret = marvell_nand_chips_init(dev, nfc);
3005	if (ret)
3006		goto release_dma;
3007
3008	return 0;
3009
3010release_dma:
3011	if (nfc->use_dma)
3012		dma_release_channel(nfc->dma_chan);
3013unprepare_reg_clk:
3014	clk_disable_unprepare(nfc->reg_clk);
3015unprepare_core_clk:
3016	clk_disable_unprepare(nfc->core_clk);
3017
3018	return ret;
3019}
3020
3021static int marvell_nfc_remove(struct platform_device *pdev)
3022{
3023	struct marvell_nfc *nfc = platform_get_drvdata(pdev);
3024
3025	marvell_nand_chips_cleanup(nfc);
3026
3027	if (nfc->use_dma) {
3028		dmaengine_terminate_all(nfc->dma_chan);
3029		dma_release_channel(nfc->dma_chan);
3030	}
3031
3032	clk_disable_unprepare(nfc->reg_clk);
3033	clk_disable_unprepare(nfc->core_clk);
3034
3035	return 0;
3036}
3037
3038static int __maybe_unused marvell_nfc_suspend(struct device *dev)
3039{
3040	struct marvell_nfc *nfc = dev_get_drvdata(dev);
3041	struct marvell_nand_chip *chip;
3042
3043	list_for_each_entry(chip, &nfc->chips, node)
3044		marvell_nfc_wait_ndrun(&chip->chip);
3045
3046	clk_disable_unprepare(nfc->reg_clk);
3047	clk_disable_unprepare(nfc->core_clk);
3048
3049	return 0;
3050}
3051
3052static int __maybe_unused marvell_nfc_resume(struct device *dev)
3053{
3054	struct marvell_nfc *nfc = dev_get_drvdata(dev);
3055	int ret;
3056
3057	ret = clk_prepare_enable(nfc->core_clk);
3058	if (ret < 0)
3059		return ret;
3060
3061	ret = clk_prepare_enable(nfc->reg_clk);
3062	if (ret < 0) {
3063		clk_disable_unprepare(nfc->core_clk);
3064		return ret;
3065	}
3066
3067	/*
3068	 * Reset nfc->selected_chip so the next command will cause the timing
3069	 * registers to be restored in marvell_nfc_select_target().
3070	 */
3071	nfc->selected_chip = NULL;
3072
3073	/* Reset registers that have lost their contents */
3074	marvell_nfc_reset(nfc);
3075
3076	return 0;
3077}
3078
3079static const struct dev_pm_ops marvell_nfc_pm_ops = {
3080	SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
3081};
3082
3083static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
3084	.max_cs_nb = 4,
3085	.max_rb_nb = 2,
3086	.need_system_controller = true,
3087	.is_nfcv2 = true,
3088};
3089
3090static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
3091	.max_cs_nb = 4,
3092	.max_rb_nb = 2,
3093	.is_nfcv2 = true,
3094};
3095
3096static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
3097	.max_cs_nb = 2,
3098	.max_rb_nb = 1,
3099	.use_dma = true,
3100};
3101
3102static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
3103	.max_cs_nb = 4,
3104	.max_rb_nb = 2,
3105	.need_system_controller = true,
3106	.legacy_of_bindings = true,
3107	.is_nfcv2 = true,
3108};
3109
3110static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
3111	.max_cs_nb = 4,
3112	.max_rb_nb = 2,
3113	.legacy_of_bindings = true,
3114	.is_nfcv2 = true,
3115};
3116
3117static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
3118	.max_cs_nb = 2,
3119	.max_rb_nb = 1,
3120	.legacy_of_bindings = true,
3121	.use_dma = true,
3122};
3123
3124static const struct platform_device_id marvell_nfc_platform_ids[] = {
3125	{
3126		.name = "pxa3xx-nand",
3127		.driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
3128	},
3129	{ /* sentinel */ },
3130};
3131MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
3132
3133static const struct of_device_id marvell_nfc_of_ids[] = {
3134	{
3135		.compatible = "marvell,armada-8k-nand-controller",
3136		.data = &marvell_armada_8k_nfc_caps,
3137	},
3138	{
3139		.compatible = "marvell,armada370-nand-controller",
3140		.data = &marvell_armada370_nfc_caps,
3141	},
3142	{
3143		.compatible = "marvell,pxa3xx-nand-controller",
3144		.data = &marvell_pxa3xx_nfc_caps,
3145	},
3146	/* Support for old/deprecated bindings: */
3147	{
3148		.compatible = "marvell,armada-8k-nand",
3149		.data = &marvell_armada_8k_nfc_legacy_caps,
3150	},
3151	{
3152		.compatible = "marvell,armada370-nand",
3153		.data = &marvell_armada370_nfc_legacy_caps,
3154	},
3155	{
3156		.compatible = "marvell,pxa3xx-nand",
3157		.data = &marvell_pxa3xx_nfc_legacy_caps,
3158	},
3159	{ /* sentinel */ },
3160};
3161MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
3162
3163static struct platform_driver marvell_nfc_driver = {
3164	.driver	= {
3165		.name		= "marvell-nfc",
3166		.of_match_table = marvell_nfc_of_ids,
3167		.pm		= &marvell_nfc_pm_ops,
3168	},
3169	.id_table = marvell_nfc_platform_ids,
3170	.probe = marvell_nfc_probe,
3171	.remove	= marvell_nfc_remove,
3172};
3173module_platform_driver(marvell_nfc_driver);
3174
3175MODULE_LICENSE("GPL");
3176MODULE_DESCRIPTION("Marvell NAND controller driver");
3177