1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * MTK ECC controller driver.
4 * Copyright (C) 2016  MediaTek Inc.
5 * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
6 *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
7 */
8
9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/iopoll.h>
15#include <linux/of.h>
16#include <linux/of_platform.h>
17#include <linux/mutex.h>
18
19#include "mtk_ecc.h"
20
21#define ECC_IDLE_MASK		BIT(0)
22#define ECC_IRQ_EN		BIT(0)
23#define ECC_PG_IRQ_SEL		BIT(1)
24#define ECC_OP_ENABLE		(1)
25#define ECC_OP_DISABLE		(0)
26
27#define ECC_ENCCON		(0x00)
28#define ECC_ENCCNFG		(0x04)
29#define		ECC_MS_SHIFT		(16)
30#define ECC_ENCDIADDR		(0x08)
31#define ECC_ENCIDLE		(0x0C)
32#define ECC_DECCON		(0x100)
33#define ECC_DECCNFG		(0x104)
34#define		DEC_EMPTY_EN		BIT(31)
35#define		DEC_CNFG_CORRECT	(0x3 << 12)
36#define ECC_DECIDLE		(0x10C)
37#define ECC_DECENUM0		(0x114)
38
39#define ECC_TIMEOUT		(500000)
40
41#define ECC_IDLE_REG(op)	((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
42#define ECC_CTL_REG(op)		((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
43
44struct mtk_ecc_caps {
45	u32 err_mask;
46	u32 err_shift;
47	const u8 *ecc_strength;
48	const u32 *ecc_regs;
49	u8 num_ecc_strength;
50	u8 ecc_mode_shift;
51	u32 parity_bits;
52	int pg_irq_sel;
53};
54
55struct mtk_ecc {
56	struct device *dev;
57	const struct mtk_ecc_caps *caps;
58	void __iomem *regs;
59	struct clk *clk;
60
61	struct completion done;
62	struct mutex lock;
63	u32 sectors;
64
65	u8 *eccdata;
66};
67
68/* ecc strength that each IP supports */
69static const u8 ecc_strength_mt2701[] = {
70	4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
71	40, 44, 48, 52, 56, 60
72};
73
74static const u8 ecc_strength_mt2712[] = {
75	4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
76	40, 44, 48, 52, 56, 60, 68, 72, 80
77};
78
79static const u8 ecc_strength_mt7622[] = {
80	4, 6, 8, 10, 12
81};
82
83enum mtk_ecc_regs {
84	ECC_ENCPAR00,
85	ECC_ENCIRQ_EN,
86	ECC_ENCIRQ_STA,
87	ECC_DECDONE,
88	ECC_DECIRQ_EN,
89	ECC_DECIRQ_STA,
90};
91
92static int mt2701_ecc_regs[] = {
93	[ECC_ENCPAR00] =        0x10,
94	[ECC_ENCIRQ_EN] =       0x80,
95	[ECC_ENCIRQ_STA] =      0x84,
96	[ECC_DECDONE] =         0x124,
97	[ECC_DECIRQ_EN] =       0x200,
98	[ECC_DECIRQ_STA] =      0x204,
99};
100
101static int mt2712_ecc_regs[] = {
102	[ECC_ENCPAR00] =        0x300,
103	[ECC_ENCIRQ_EN] =       0x80,
104	[ECC_ENCIRQ_STA] =      0x84,
105	[ECC_DECDONE] =         0x124,
106	[ECC_DECIRQ_EN] =       0x200,
107	[ECC_DECIRQ_STA] =      0x204,
108};
109
110static int mt7622_ecc_regs[] = {
111	[ECC_ENCPAR00] =        0x10,
112	[ECC_ENCIRQ_EN] =       0x30,
113	[ECC_ENCIRQ_STA] =      0x34,
114	[ECC_DECDONE] =         0x11c,
115	[ECC_DECIRQ_EN] =       0x140,
116	[ECC_DECIRQ_STA] =      0x144,
117};
118
119static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
120				     enum mtk_ecc_operation op)
121{
122	struct device *dev = ecc->dev;
123	u32 val;
124	int ret;
125
126	ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
127					val & ECC_IDLE_MASK,
128					10, ECC_TIMEOUT);
129	if (ret)
130		dev_warn(dev, "%s NOT idle\n",
131			 op == ECC_ENCODE ? "encoder" : "decoder");
132}
133
134static irqreturn_t mtk_ecc_irq(int irq, void *id)
135{
136	struct mtk_ecc *ecc = id;
137	u32 dec, enc;
138
139	dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
140		    & ECC_IRQ_EN;
141	if (dec) {
142		dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
143		if (dec & ecc->sectors) {
144			/*
145			 * Clear decode IRQ status once again to ensure that
146			 * there will be no extra IRQ.
147			 */
148			readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
149			ecc->sectors = 0;
150			complete(&ecc->done);
151		} else {
152			return IRQ_HANDLED;
153		}
154	} else {
155		enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
156		      & ECC_IRQ_EN;
157		if (enc)
158			complete(&ecc->done);
159		else
160			return IRQ_NONE;
161	}
162
163	return IRQ_HANDLED;
164}
165
166static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
167{
168	u32 ecc_bit, dec_sz, enc_sz;
169	u32 reg, i;
170
171	for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
172		if (ecc->caps->ecc_strength[i] == config->strength)
173			break;
174	}
175
176	if (i == ecc->caps->num_ecc_strength) {
177		dev_err(ecc->dev, "invalid ecc strength %d\n",
178			config->strength);
179		return -EINVAL;
180	}
181
182	ecc_bit = i;
183
184	if (config->op == ECC_ENCODE) {
185		/* configure ECC encoder (in bits) */
186		enc_sz = config->len << 3;
187
188		reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
189		reg |= (enc_sz << ECC_MS_SHIFT);
190		writel(reg, ecc->regs + ECC_ENCCNFG);
191
192		if (config->mode != ECC_NFI_MODE)
193			writel(lower_32_bits(config->addr),
194			       ecc->regs + ECC_ENCDIADDR);
195
196	} else {
197		/* configure ECC decoder (in bits) */
198		dec_sz = (config->len << 3) +
199			 config->strength * ecc->caps->parity_bits;
200
201		reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
202		reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
203		reg |= DEC_EMPTY_EN;
204		writel(reg, ecc->regs + ECC_DECCNFG);
205
206		if (config->sectors)
207			ecc->sectors = 1 << (config->sectors - 1);
208	}
209
210	return 0;
211}
212
213void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
214		       int sectors)
215{
216	u32 offset, i, err;
217	u32 bitflips = 0;
218
219	stats->corrected = 0;
220	stats->failed = 0;
221
222	for (i = 0; i < sectors; i++) {
223		offset = (i >> 2) << 2;
224		err = readl(ecc->regs + ECC_DECENUM0 + offset);
225		err = err >> ((i % 4) * ecc->caps->err_shift);
226		err &= ecc->caps->err_mask;
227		if (err == ecc->caps->err_mask) {
228			/* uncorrectable errors */
229			stats->failed++;
230			continue;
231		}
232
233		stats->corrected += err;
234		bitflips = max_t(u32, bitflips, err);
235	}
236
237	stats->bitflips = bitflips;
238}
239EXPORT_SYMBOL(mtk_ecc_get_stats);
240
241void mtk_ecc_release(struct mtk_ecc *ecc)
242{
243	clk_disable_unprepare(ecc->clk);
244	put_device(ecc->dev);
245}
246EXPORT_SYMBOL(mtk_ecc_release);
247
248static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
249{
250	mtk_ecc_wait_idle(ecc, ECC_ENCODE);
251	writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
252
253	mtk_ecc_wait_idle(ecc, ECC_DECODE);
254	writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
255}
256
257static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
258{
259	struct platform_device *pdev;
260	struct mtk_ecc *ecc;
261
262	pdev = of_find_device_by_node(np);
263	if (!pdev)
264		return ERR_PTR(-EPROBE_DEFER);
265
266	ecc = platform_get_drvdata(pdev);
267	if (!ecc) {
268		put_device(&pdev->dev);
269		return ERR_PTR(-EPROBE_DEFER);
270	}
271
272	clk_prepare_enable(ecc->clk);
273	mtk_ecc_hw_init(ecc);
274
275	return ecc;
276}
277
278struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
279{
280	struct mtk_ecc *ecc = NULL;
281	struct device_node *np;
282
283	np = of_parse_phandle(of_node, "ecc-engine", 0);
284	if (np) {
285		ecc = mtk_ecc_get(np);
286		of_node_put(np);
287	}
288
289	return ecc;
290}
291EXPORT_SYMBOL(of_mtk_ecc_get);
292
293int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
294{
295	enum mtk_ecc_operation op = config->op;
296	u16 reg_val;
297	int ret;
298
299	ret = mutex_lock_interruptible(&ecc->lock);
300	if (ret) {
301		dev_err(ecc->dev, "interrupted when attempting to lock\n");
302		return ret;
303	}
304
305	mtk_ecc_wait_idle(ecc, op);
306
307	ret = mtk_ecc_config(ecc, config);
308	if (ret) {
309		mutex_unlock(&ecc->lock);
310		return ret;
311	}
312
313	if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
314		init_completion(&ecc->done);
315		reg_val = ECC_IRQ_EN;
316		/*
317		 * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
318		 * means this chip can only generate one ecc irq during page
319		 * read / write. If is 0, generate one ecc irq each ecc step.
320		 */
321		if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
322			reg_val |= ECC_PG_IRQ_SEL;
323		if (op == ECC_ENCODE)
324			writew(reg_val, ecc->regs +
325			       ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
326		else
327			writew(reg_val, ecc->regs +
328			       ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
329	}
330
331	writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
332
333	return 0;
334}
335EXPORT_SYMBOL(mtk_ecc_enable);
336
337void mtk_ecc_disable(struct mtk_ecc *ecc)
338{
339	enum mtk_ecc_operation op = ECC_ENCODE;
340
341	/* find out the running operation */
342	if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
343		op = ECC_DECODE;
344
345	/* disable it */
346	mtk_ecc_wait_idle(ecc, op);
347	if (op == ECC_DECODE) {
348		/*
349		 * Clear decode IRQ status in case there is a timeout to wait
350		 * decode IRQ.
351		 */
352		readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
353		writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
354	} else {
355		writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
356	}
357
358	writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
359
360	mutex_unlock(&ecc->lock);
361}
362EXPORT_SYMBOL(mtk_ecc_disable);
363
364int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
365{
366	int ret;
367
368	ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
369	if (!ret) {
370		dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
371			(op == ECC_ENCODE) ? "encoder" : "decoder");
372		return -ETIMEDOUT;
373	}
374
375	return 0;
376}
377EXPORT_SYMBOL(mtk_ecc_wait_done);
378
379int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
380		   u8 *data, u32 bytes)
381{
382	dma_addr_t addr;
383	u32 len;
384	int ret;
385
386	addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
387	ret = dma_mapping_error(ecc->dev, addr);
388	if (ret) {
389		dev_err(ecc->dev, "dma mapping error\n");
390		return -EINVAL;
391	}
392
393	config->op = ECC_ENCODE;
394	config->addr = addr;
395	ret = mtk_ecc_enable(ecc, config);
396	if (ret) {
397		dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
398		return ret;
399	}
400
401	ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
402	if (ret)
403		goto timeout;
404
405	mtk_ecc_wait_idle(ecc, ECC_ENCODE);
406
407	/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
408	len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
409
410	/* write the parity bytes generated by the ECC back to temp buffer */
411	__ioread32_copy(ecc->eccdata,
412			ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
413			round_up(len, 4));
414
415	/* copy into possibly unaligned OOB region with actual length */
416	memcpy(data + bytes, ecc->eccdata, len);
417timeout:
418
419	dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
420	mtk_ecc_disable(ecc);
421
422	return ret;
423}
424EXPORT_SYMBOL(mtk_ecc_encode);
425
426void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
427{
428	const u8 *ecc_strength = ecc->caps->ecc_strength;
429	int i;
430
431	for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
432		if (*p <= ecc_strength[i]) {
433			if (!i)
434				*p = ecc_strength[i];
435			else if (*p != ecc_strength[i])
436				*p = ecc_strength[i - 1];
437			return;
438		}
439	}
440
441	*p = ecc_strength[ecc->caps->num_ecc_strength - 1];
442}
443EXPORT_SYMBOL(mtk_ecc_adjust_strength);
444
445unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
446{
447	return ecc->caps->parity_bits;
448}
449EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
450
451static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
452	.err_mask = 0x3f,
453	.err_shift = 8,
454	.ecc_strength = ecc_strength_mt2701,
455	.ecc_regs = mt2701_ecc_regs,
456	.num_ecc_strength = 20,
457	.ecc_mode_shift = 5,
458	.parity_bits = 14,
459	.pg_irq_sel = 0,
460};
461
462static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
463	.err_mask = 0x7f,
464	.err_shift = 8,
465	.ecc_strength = ecc_strength_mt2712,
466	.ecc_regs = mt2712_ecc_regs,
467	.num_ecc_strength = 23,
468	.ecc_mode_shift = 5,
469	.parity_bits = 14,
470	.pg_irq_sel = 1,
471};
472
473static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
474	.err_mask = 0x1f,
475	.err_shift = 5,
476	.ecc_strength = ecc_strength_mt7622,
477	.ecc_regs = mt7622_ecc_regs,
478	.num_ecc_strength = 5,
479	.ecc_mode_shift = 4,
480	.parity_bits = 13,
481	.pg_irq_sel = 0,
482};
483
484static const struct of_device_id mtk_ecc_dt_match[] = {
485	{
486		.compatible = "mediatek,mt2701-ecc",
487		.data = &mtk_ecc_caps_mt2701,
488	}, {
489		.compatible = "mediatek,mt2712-ecc",
490		.data = &mtk_ecc_caps_mt2712,
491	}, {
492		.compatible = "mediatek,mt7622-ecc",
493		.data = &mtk_ecc_caps_mt7622,
494	},
495	{},
496};
497
498static int mtk_ecc_probe(struct platform_device *pdev)
499{
500	struct device *dev = &pdev->dev;
501	struct mtk_ecc *ecc;
502	struct resource *res;
503	u32 max_eccdata_size;
504	int irq, ret;
505
506	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
507	if (!ecc)
508		return -ENOMEM;
509
510	ecc->caps = of_device_get_match_data(dev);
511
512	max_eccdata_size = ecc->caps->num_ecc_strength - 1;
513	max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
514	max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
515	max_eccdata_size = round_up(max_eccdata_size, 4);
516	ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
517	if (!ecc->eccdata)
518		return -ENOMEM;
519
520	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
521	ecc->regs = devm_ioremap_resource(dev, res);
522	if (IS_ERR(ecc->regs)) {
523		dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
524		return PTR_ERR(ecc->regs);
525	}
526
527	ecc->clk = devm_clk_get(dev, NULL);
528	if (IS_ERR(ecc->clk)) {
529		dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
530		return PTR_ERR(ecc->clk);
531	}
532
533	irq = platform_get_irq(pdev, 0);
534	if (irq < 0)
535		return irq;
536
537	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
538	if (ret) {
539		dev_err(dev, "failed to set DMA mask\n");
540		return ret;
541	}
542
543	ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
544	if (ret) {
545		dev_err(dev, "failed to request irq\n");
546		return -EINVAL;
547	}
548
549	ecc->dev = dev;
550	mutex_init(&ecc->lock);
551	platform_set_drvdata(pdev, ecc);
552	dev_info(dev, "probed\n");
553
554	return 0;
555}
556
557#ifdef CONFIG_PM_SLEEP
558static int mtk_ecc_suspend(struct device *dev)
559{
560	struct mtk_ecc *ecc = dev_get_drvdata(dev);
561
562	clk_disable_unprepare(ecc->clk);
563
564	return 0;
565}
566
567static int mtk_ecc_resume(struct device *dev)
568{
569	struct mtk_ecc *ecc = dev_get_drvdata(dev);
570	int ret;
571
572	ret = clk_prepare_enable(ecc->clk);
573	if (ret) {
574		dev_err(dev, "failed to enable clk\n");
575		return ret;
576	}
577
578	return 0;
579}
580
581static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
582#endif
583
584MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
585
586static struct platform_driver mtk_ecc_driver = {
587	.probe  = mtk_ecc_probe,
588	.driver = {
589		.name  = "mtk-ecc",
590		.of_match_table = of_match_ptr(mtk_ecc_dt_match),
591#ifdef CONFIG_PM_SLEEP
592		.pm = &mtk_ecc_pm_ops,
593#endif
594	},
595};
596
597module_platform_driver(mtk_ecc_driver);
598
599MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
600MODULE_DESCRIPTION("MTK Nand ECC Driver");
601MODULE_LICENSE("Dual MIT/GPL");
602