1/*
2 * Synopsys DDR ECC Driver
3 * This driver is based on ppc4xx_edac.c drivers
4 *
5 * Copyright (C) 2012 - 2014 Xilinx, Inc.
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License.  See the file "COPYING" in the main directory of this archive
19 * for more details
20 */
21
22#include <linux/edac.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/interrupt.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28
29#include "edac_module.h"
30
31/* Number of cs_rows needed per memory controller */
32#define SYNPS_EDAC_NR_CSROWS		1
33
34/* Number of channels per memory controller */
35#define SYNPS_EDAC_NR_CHANS		1
36
37/* Granularity of reported error in bytes */
38#define SYNPS_EDAC_ERR_GRAIN		1
39
40#define SYNPS_EDAC_MSG_SIZE		256
41
42#define SYNPS_EDAC_MOD_STRING		"synps_edac"
43#define SYNPS_EDAC_MOD_VER		"1"
44
45/* Synopsys DDR memory controller registers that are relevant to ECC */
46#define CTRL_OFST			0x0
47#define T_ZQ_OFST			0xA4
48
49/* ECC control register */
50#define ECC_CTRL_OFST			0xC4
51/* ECC log register */
52#define CE_LOG_OFST			0xC8
53/* ECC address register */
54#define CE_ADDR_OFST			0xCC
55/* ECC data[31:0] register */
56#define CE_DATA_31_0_OFST		0xD0
57
58/* Uncorrectable error info registers */
59#define UE_LOG_OFST			0xDC
60#define UE_ADDR_OFST			0xE0
61#define UE_DATA_31_0_OFST		0xE4
62
63#define STAT_OFST			0xF0
64#define SCRUB_OFST			0xF4
65
66/* Control register bit field definitions */
67#define CTRL_BW_MASK			0xC
68#define CTRL_BW_SHIFT			2
69
70#define DDRCTL_WDTH_16			1
71#define DDRCTL_WDTH_32			0
72
73/* ZQ register bit field definitions */
74#define T_ZQ_DDRMODE_MASK		0x2
75
76/* ECC control register bit field definitions */
77#define ECC_CTRL_CLR_CE_ERR		0x2
78#define ECC_CTRL_CLR_UE_ERR		0x1
79
80/* ECC correctable/uncorrectable error log register definitions */
81#define LOG_VALID			0x1
82#define CE_LOG_BITPOS_MASK		0xFE
83#define CE_LOG_BITPOS_SHIFT		1
84
85/* ECC correctable/uncorrectable error address register definitions */
86#define ADDR_COL_MASK			0xFFF
87#define ADDR_ROW_MASK			0xFFFF000
88#define ADDR_ROW_SHIFT			12
89#define ADDR_BANK_MASK			0x70000000
90#define ADDR_BANK_SHIFT			28
91
92/* ECC statistic register definitions */
93#define STAT_UECNT_MASK			0xFF
94#define STAT_CECNT_MASK			0xFF00
95#define STAT_CECNT_SHIFT		8
96
97/* ECC scrub register definitions */
98#define SCRUB_MODE_MASK			0x7
99#define SCRUB_MODE_SECDED		0x4
100
101/* DDR ECC Quirks */
102#define DDR_ECC_INTR_SUPPORT		BIT(0)
103#define DDR_ECC_DATA_POISON_SUPPORT	BIT(1)
104
105/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
106/* ECC Configuration Registers */
107#define ECC_CFG0_OFST			0x70
108#define ECC_CFG1_OFST			0x74
109
110/* ECC Status Register */
111#define ECC_STAT_OFST			0x78
112
113/* ECC Clear Register */
114#define ECC_CLR_OFST			0x7C
115
116/* ECC Error count Register */
117#define ECC_ERRCNT_OFST			0x80
118
119/* ECC Corrected Error Address Register */
120#define ECC_CEADDR0_OFST		0x84
121#define ECC_CEADDR1_OFST		0x88
122
123/* ECC Syndrome Registers */
124#define ECC_CSYND0_OFST			0x8C
125#define ECC_CSYND1_OFST			0x90
126#define ECC_CSYND2_OFST			0x94
127
128/* ECC Bit Mask0 Address Register */
129#define ECC_BITMASK0_OFST		0x98
130#define ECC_BITMASK1_OFST		0x9C
131#define ECC_BITMASK2_OFST		0xA0
132
133/* ECC UnCorrected Error Address Register */
134#define ECC_UEADDR0_OFST		0xA4
135#define ECC_UEADDR1_OFST		0xA8
136
137/* ECC Syndrome Registers */
138#define ECC_UESYND0_OFST		0xAC
139#define ECC_UESYND1_OFST		0xB0
140#define ECC_UESYND2_OFST		0xB4
141
142/* ECC Poison Address Reg */
143#define ECC_POISON0_OFST		0xB8
144#define ECC_POISON1_OFST		0xBC
145
146#define ECC_ADDRMAP0_OFFSET		0x200
147
148/* Control register bitfield definitions */
149#define ECC_CTRL_BUSWIDTH_MASK		0x3000
150#define ECC_CTRL_BUSWIDTH_SHIFT		12
151#define ECC_CTRL_CLR_CE_ERRCNT		BIT(2)
152#define ECC_CTRL_CLR_UE_ERRCNT		BIT(3)
153
154/* DDR Control Register width definitions  */
155#define DDRCTL_EWDTH_16			2
156#define DDRCTL_EWDTH_32			1
157#define DDRCTL_EWDTH_64			0
158
159/* ECC status register definitions */
160#define ECC_STAT_UECNT_MASK		0xF0000
161#define ECC_STAT_UECNT_SHIFT		16
162#define ECC_STAT_CECNT_MASK		0xF00
163#define ECC_STAT_CECNT_SHIFT		8
164#define ECC_STAT_BITNUM_MASK		0x7F
165
166/* ECC error count register definitions */
167#define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
168#define ECC_ERRCNT_UECNT_SHIFT		16
169#define ECC_ERRCNT_CECNT_MASK		0xFFFF
170
171/* DDR QOS Interrupt register definitions */
172#define DDR_QOS_IRQ_STAT_OFST		0x20200
173#define DDR_QOSUE_MASK			0x4
174#define	DDR_QOSCE_MASK			0x2
175#define	ECC_CE_UE_INTR_MASK		0x6
176#define DDR_QOS_IRQ_EN_OFST		0x20208
177#define DDR_QOS_IRQ_DB_OFST		0x2020C
178
179/* ECC Corrected Error Register Mask and Shifts*/
180#define ECC_CEADDR0_RW_MASK		0x3FFFF
181#define ECC_CEADDR0_RNK_MASK		BIT(24)
182#define ECC_CEADDR1_BNKGRP_MASK		0x3000000
183#define ECC_CEADDR1_BNKNR_MASK		0x70000
184#define ECC_CEADDR1_BLKNR_MASK		0xFFF
185#define ECC_CEADDR1_BNKGRP_SHIFT	24
186#define ECC_CEADDR1_BNKNR_SHIFT		16
187
188/* ECC Poison register shifts */
189#define ECC_POISON0_RANK_SHIFT		24
190#define ECC_POISON0_RANK_MASK		BIT(24)
191#define ECC_POISON0_COLUMN_SHIFT	0
192#define ECC_POISON0_COLUMN_MASK		0xFFF
193#define ECC_POISON1_BG_SHIFT		28
194#define ECC_POISON1_BG_MASK		0x30000000
195#define ECC_POISON1_BANKNR_SHIFT	24
196#define ECC_POISON1_BANKNR_MASK		0x7000000
197#define ECC_POISON1_ROW_SHIFT		0
198#define ECC_POISON1_ROW_MASK		0x3FFFF
199
200/* DDR Memory type defines */
201#define MEM_TYPE_DDR3			0x1
202#define MEM_TYPE_LPDDR3			0x8
203#define MEM_TYPE_DDR2			0x4
204#define MEM_TYPE_DDR4			0x10
205#define MEM_TYPE_LPDDR4			0x20
206
207/* DDRC Software control register */
208#define DDRC_SWCTL			0x320
209
210/* DDRC ECC CE & UE poison mask */
211#define ECC_CEPOISON_MASK		0x3
212#define ECC_UEPOISON_MASK		0x1
213
214/* DDRC Device config masks */
215#define DDRC_MSTR_CFG_MASK		0xC0000000
216#define DDRC_MSTR_CFG_SHIFT		30
217#define DDRC_MSTR_CFG_X4_MASK		0x0
218#define DDRC_MSTR_CFG_X8_MASK		0x1
219#define DDRC_MSTR_CFG_X16_MASK		0x2
220#define DDRC_MSTR_CFG_X32_MASK		0x3
221
222#define DDR_MAX_ROW_SHIFT		18
223#define DDR_MAX_COL_SHIFT		14
224#define DDR_MAX_BANK_SHIFT		3
225#define DDR_MAX_BANKGRP_SHIFT		2
226
227#define ROW_MAX_VAL_MASK		0xF
228#define COL_MAX_VAL_MASK		0xF
229#define BANK_MAX_VAL_MASK		0x1F
230#define BANKGRP_MAX_VAL_MASK		0x1F
231#define RANK_MAX_VAL_MASK		0x1F
232
233#define ROW_B0_BASE			6
234#define ROW_B1_BASE			7
235#define ROW_B2_BASE			8
236#define ROW_B3_BASE			9
237#define ROW_B4_BASE			10
238#define ROW_B5_BASE			11
239#define ROW_B6_BASE			12
240#define ROW_B7_BASE			13
241#define ROW_B8_BASE			14
242#define ROW_B9_BASE			15
243#define ROW_B10_BASE			16
244#define ROW_B11_BASE			17
245#define ROW_B12_BASE			18
246#define ROW_B13_BASE			19
247#define ROW_B14_BASE			20
248#define ROW_B15_BASE			21
249#define ROW_B16_BASE			22
250#define ROW_B17_BASE			23
251
252#define COL_B2_BASE			2
253#define COL_B3_BASE			3
254#define COL_B4_BASE			4
255#define COL_B5_BASE			5
256#define COL_B6_BASE			6
257#define COL_B7_BASE			7
258#define COL_B8_BASE			8
259#define COL_B9_BASE			9
260#define COL_B10_BASE			10
261#define COL_B11_BASE			11
262#define COL_B12_BASE			12
263#define COL_B13_BASE			13
264
265#define BANK_B0_BASE			2
266#define BANK_B1_BASE			3
267#define BANK_B2_BASE			4
268
269#define BANKGRP_B0_BASE			2
270#define BANKGRP_B1_BASE			3
271
272#define RANK_B0_BASE			6
273
274/**
275 * struct ecc_error_info - ECC error log information.
276 * @row:	Row number.
277 * @col:	Column number.
278 * @bank:	Bank number.
279 * @bitpos:	Bit position.
280 * @data:	Data causing the error.
281 * @bankgrpnr:	Bank group number.
282 * @blknr:	Block number.
283 */
284struct ecc_error_info {
285	u32 row;
286	u32 col;
287	u32 bank;
288	u32 bitpos;
289	u32 data;
290	u32 bankgrpnr;
291	u32 blknr;
292};
293
294/**
295 * struct synps_ecc_status - ECC status information to report.
296 * @ce_cnt:	Correctable error count.
297 * @ue_cnt:	Uncorrectable error count.
298 * @ceinfo:	Correctable error log information.
299 * @ueinfo:	Uncorrectable error log information.
300 */
301struct synps_ecc_status {
302	u32 ce_cnt;
303	u32 ue_cnt;
304	struct ecc_error_info ceinfo;
305	struct ecc_error_info ueinfo;
306};
307
308/**
309 * struct synps_edac_priv - DDR memory controller private instance data.
310 * @baseaddr:		Base address of the DDR controller.
311 * @message:		Buffer for framing the event specific info.
312 * @stat:		ECC status information.
313 * @p_data:		Platform data.
314 * @ce_cnt:		Correctable Error count.
315 * @ue_cnt:		Uncorrectable Error count.
316 * @poison_addr:	Data poison address.
317 * @row_shift:		Bit shifts for row bit.
318 * @col_shift:		Bit shifts for column bit.
319 * @bank_shift:		Bit shifts for bank bit.
320 * @bankgrp_shift:	Bit shifts for bank group bit.
321 * @rank_shift:		Bit shifts for rank bit.
322 */
323struct synps_edac_priv {
324	void __iomem *baseaddr;
325	char message[SYNPS_EDAC_MSG_SIZE];
326	struct synps_ecc_status stat;
327	const struct synps_platform_data *p_data;
328	u32 ce_cnt;
329	u32 ue_cnt;
330#ifdef CONFIG_EDAC_DEBUG
331	ulong poison_addr;
332	u32 row_shift[18];
333	u32 col_shift[14];
334	u32 bank_shift[3];
335	u32 bankgrp_shift[2];
336	u32 rank_shift[1];
337#endif
338};
339
340/**
341 * struct synps_platform_data -  synps platform data structure.
342 * @get_error_info:	Get EDAC error info.
343 * @get_mtype:		Get mtype.
344 * @get_dtype:		Get dtype.
345 * @get_ecc_state:	Get ECC state.
346 * @quirks:		To differentiate IPs.
347 */
348struct synps_platform_data {
349	int (*get_error_info)(struct synps_edac_priv *priv);
350	enum mem_type (*get_mtype)(const void __iomem *base);
351	enum dev_type (*get_dtype)(const void __iomem *base);
352	bool (*get_ecc_state)(void __iomem *base);
353	int quirks;
354};
355
356/**
357 * zynq_get_error_info - Get the current ECC error info.
358 * @priv:	DDR memory controller private instance data.
359 *
360 * Return: one if there is no error, otherwise zero.
361 */
362static int zynq_get_error_info(struct synps_edac_priv *priv)
363{
364	struct synps_ecc_status *p;
365	u32 regval, clearval = 0;
366	void __iomem *base;
367
368	base = priv->baseaddr;
369	p = &priv->stat;
370
371	regval = readl(base + STAT_OFST);
372	if (!regval)
373		return 1;
374
375	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
376	p->ue_cnt = regval & STAT_UECNT_MASK;
377
378	regval = readl(base + CE_LOG_OFST);
379	if (!(p->ce_cnt && (regval & LOG_VALID)))
380		goto ue_err;
381
382	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
383	regval = readl(base + CE_ADDR_OFST);
384	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
385	p->ceinfo.col = regval & ADDR_COL_MASK;
386	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
387	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
388	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
389		 p->ceinfo.data);
390	clearval = ECC_CTRL_CLR_CE_ERR;
391
392ue_err:
393	regval = readl(base + UE_LOG_OFST);
394	if (!(p->ue_cnt && (regval & LOG_VALID)))
395		goto out;
396
397	regval = readl(base + UE_ADDR_OFST);
398	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
399	p->ueinfo.col = regval & ADDR_COL_MASK;
400	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
401	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
402	clearval |= ECC_CTRL_CLR_UE_ERR;
403
404out:
405	writel(clearval, base + ECC_CTRL_OFST);
406	writel(0x0, base + ECC_CTRL_OFST);
407
408	return 0;
409}
410
411/**
412 * zynqmp_get_error_info - Get the current ECC error info.
413 * @priv:	DDR memory controller private instance data.
414 *
415 * Return: one if there is no error otherwise returns zero.
416 */
417static int zynqmp_get_error_info(struct synps_edac_priv *priv)
418{
419	struct synps_ecc_status *p;
420	u32 regval, clearval = 0;
421	void __iomem *base;
422
423	base = priv->baseaddr;
424	p = &priv->stat;
425
426	regval = readl(base + ECC_ERRCNT_OFST);
427	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
428	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
429	if (!p->ce_cnt)
430		goto ue_err;
431
432	regval = readl(base + ECC_STAT_OFST);
433	if (!regval)
434		return 1;
435
436	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
437
438	regval = readl(base + ECC_CEADDR0_OFST);
439	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
440	regval = readl(base + ECC_CEADDR1_OFST);
441	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
442					ECC_CEADDR1_BNKNR_SHIFT;
443	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
444					ECC_CEADDR1_BNKGRP_SHIFT;
445	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
446	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
447	edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
448		 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
449		 readl(base + ECC_CSYND2_OFST));
450ue_err:
451	if (!p->ue_cnt)
452		goto out;
453
454	regval = readl(base + ECC_UEADDR0_OFST);
455	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
456	regval = readl(base + ECC_UEADDR1_OFST);
457	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
458					ECC_CEADDR1_BNKGRP_SHIFT;
459	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
460					ECC_CEADDR1_BNKNR_SHIFT;
461	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
462	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
463out:
464	clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
465	clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
466	writel(clearval, base + ECC_CLR_OFST);
467	writel(0x0, base + ECC_CLR_OFST);
468
469	return 0;
470}
471
472/**
473 * handle_error - Handle Correctable and Uncorrectable errors.
474 * @mci:	EDAC memory controller instance.
475 * @p:		Synopsys ECC status structure.
476 *
477 * Handles ECC correctable and uncorrectable errors.
478 */
479static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
480{
481	struct synps_edac_priv *priv = mci->pvt_info;
482	struct ecc_error_info *pinf;
483
484	if (p->ce_cnt) {
485		pinf = &p->ceinfo;
486		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
487			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
488				 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
489				 "CE", pinf->row, pinf->bank,
490				 pinf->bankgrpnr, pinf->blknr,
491				 pinf->bitpos, pinf->data);
492		} else {
493			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
494				 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
495				 "CE", pinf->row, pinf->bank, pinf->col,
496				 pinf->bitpos, pinf->data);
497		}
498
499		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
500				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
501				     priv->message, "");
502	}
503
504	if (p->ue_cnt) {
505		pinf = &p->ueinfo;
506		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
507			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
508				 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
509				 "UE", pinf->row, pinf->bank,
510				 pinf->bankgrpnr, pinf->blknr);
511		} else {
512			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
513				 "DDR ECC error type :%s Row %d Bank %d Col %d ",
514				 "UE", pinf->row, pinf->bank, pinf->col);
515		}
516
517		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
518				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
519				     priv->message, "");
520	}
521
522	memset(p, 0, sizeof(*p));
523}
524
525/**
526 * intr_handler - Interrupt Handler for ECC interrupts.
527 * @irq:        IRQ number.
528 * @dev_id:     Device ID.
529 *
530 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
531 */
532static irqreturn_t intr_handler(int irq, void *dev_id)
533{
534	const struct synps_platform_data *p_data;
535	struct mem_ctl_info *mci = dev_id;
536	struct synps_edac_priv *priv;
537	int status, regval;
538
539	priv = mci->pvt_info;
540	p_data = priv->p_data;
541
542	regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
543	regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
544	if (!(regval & ECC_CE_UE_INTR_MASK))
545		return IRQ_NONE;
546
547	status = p_data->get_error_info(priv);
548	if (status)
549		return IRQ_NONE;
550
551	priv->ce_cnt += priv->stat.ce_cnt;
552	priv->ue_cnt += priv->stat.ue_cnt;
553	handle_error(mci, &priv->stat);
554
555	edac_dbg(3, "Total error count CE %d UE %d\n",
556		 priv->ce_cnt, priv->ue_cnt);
557	writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
558	return IRQ_HANDLED;
559}
560
561/**
562 * check_errors - Check controller for ECC errors.
563 * @mci:	EDAC memory controller instance.
564 *
565 * Check and post ECC errors. Called by the polling thread.
566 */
567static void check_errors(struct mem_ctl_info *mci)
568{
569	const struct synps_platform_data *p_data;
570	struct synps_edac_priv *priv;
571	int status;
572
573	priv = mci->pvt_info;
574	p_data = priv->p_data;
575
576	status = p_data->get_error_info(priv);
577	if (status)
578		return;
579
580	priv->ce_cnt += priv->stat.ce_cnt;
581	priv->ue_cnt += priv->stat.ue_cnt;
582	handle_error(mci, &priv->stat);
583
584	edac_dbg(3, "Total error count CE %d UE %d\n",
585		 priv->ce_cnt, priv->ue_cnt);
586}
587
588/**
589 * zynq_get_dtype - Return the controller memory width.
590 * @base:	DDR memory controller base address.
591 *
592 * Get the EDAC device type width appropriate for the current controller
593 * configuration.
594 *
595 * Return: a device type width enumeration.
596 */
597static enum dev_type zynq_get_dtype(const void __iomem *base)
598{
599	enum dev_type dt;
600	u32 width;
601
602	width = readl(base + CTRL_OFST);
603	width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
604
605	switch (width) {
606	case DDRCTL_WDTH_16:
607		dt = DEV_X2;
608		break;
609	case DDRCTL_WDTH_32:
610		dt = DEV_X4;
611		break;
612	default:
613		dt = DEV_UNKNOWN;
614	}
615
616	return dt;
617}
618
619/**
620 * zynqmp_get_dtype - Return the controller memory width.
621 * @base:	DDR memory controller base address.
622 *
623 * Get the EDAC device type width appropriate for the current controller
624 * configuration.
625 *
626 * Return: a device type width enumeration.
627 */
628static enum dev_type zynqmp_get_dtype(const void __iomem *base)
629{
630	enum dev_type dt;
631	u32 width;
632
633	width = readl(base + CTRL_OFST);
634	width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
635	switch (width) {
636	case DDRCTL_EWDTH_16:
637		dt = DEV_X2;
638		break;
639	case DDRCTL_EWDTH_32:
640		dt = DEV_X4;
641		break;
642	case DDRCTL_EWDTH_64:
643		dt = DEV_X8;
644		break;
645	default:
646		dt = DEV_UNKNOWN;
647	}
648
649	return dt;
650}
651
652/**
653 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
654 * @base:	DDR memory controller base address.
655 *
656 * Get the ECC enable/disable status of the controller.
657 *
658 * Return: true if enabled, otherwise false.
659 */
660static bool zynq_get_ecc_state(void __iomem *base)
661{
662	enum dev_type dt;
663	u32 ecctype;
664
665	dt = zynq_get_dtype(base);
666	if (dt == DEV_UNKNOWN)
667		return false;
668
669	ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
670	if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
671		return true;
672
673	return false;
674}
675
676/**
677 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
678 * @base:	DDR memory controller base address.
679 *
680 * Get the ECC enable/disable status for the controller.
681 *
682 * Return: a ECC status boolean i.e true/false - enabled/disabled.
683 */
684static bool zynqmp_get_ecc_state(void __iomem *base)
685{
686	enum dev_type dt;
687	u32 ecctype;
688
689	dt = zynqmp_get_dtype(base);
690	if (dt == DEV_UNKNOWN)
691		return false;
692
693	ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
694	if ((ecctype == SCRUB_MODE_SECDED) &&
695	    ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
696		return true;
697
698	return false;
699}
700
701/**
702 * get_memsize - Read the size of the attached memory device.
703 *
704 * Return: the memory size in bytes.
705 */
706static u32 get_memsize(void)
707{
708	struct sysinfo inf;
709
710	si_meminfo(&inf);
711
712	return inf.totalram * inf.mem_unit;
713}
714
715/**
716 * zynq_get_mtype - Return the controller memory type.
717 * @base:	Synopsys ECC status structure.
718 *
719 * Get the EDAC memory type appropriate for the current controller
720 * configuration.
721 *
722 * Return: a memory type enumeration.
723 */
724static enum mem_type zynq_get_mtype(const void __iomem *base)
725{
726	enum mem_type mt;
727	u32 memtype;
728
729	memtype = readl(base + T_ZQ_OFST);
730
731	if (memtype & T_ZQ_DDRMODE_MASK)
732		mt = MEM_DDR3;
733	else
734		mt = MEM_DDR2;
735
736	return mt;
737}
738
739/**
740 * zynqmp_get_mtype - Returns controller memory type.
741 * @base:	Synopsys ECC status structure.
742 *
743 * Get the EDAC memory type appropriate for the current controller
744 * configuration.
745 *
746 * Return: a memory type enumeration.
747 */
748static enum mem_type zynqmp_get_mtype(const void __iomem *base)
749{
750	enum mem_type mt;
751	u32 memtype;
752
753	memtype = readl(base + CTRL_OFST);
754
755	if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
756		mt = MEM_DDR3;
757	else if (memtype & MEM_TYPE_DDR2)
758		mt = MEM_RDDR2;
759	else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
760		mt = MEM_DDR4;
761	else
762		mt = MEM_EMPTY;
763
764	return mt;
765}
766
767/**
768 * init_csrows - Initialize the csrow data.
769 * @mci:	EDAC memory controller instance.
770 *
771 * Initialize the chip select rows associated with the EDAC memory
772 * controller instance.
773 */
774static void init_csrows(struct mem_ctl_info *mci)
775{
776	struct synps_edac_priv *priv = mci->pvt_info;
777	const struct synps_platform_data *p_data;
778	struct csrow_info *csi;
779	struct dimm_info *dimm;
780	u32 size, row;
781	int j;
782
783	p_data = priv->p_data;
784
785	for (row = 0; row < mci->nr_csrows; row++) {
786		csi = mci->csrows[row];
787		size = get_memsize();
788
789		for (j = 0; j < csi->nr_channels; j++) {
790			dimm		= csi->channels[j]->dimm;
791			dimm->edac_mode	= EDAC_SECDED;
792			dimm->mtype	= p_data->get_mtype(priv->baseaddr);
793			dimm->nr_pages	= (size >> PAGE_SHIFT) / csi->nr_channels;
794			dimm->grain	= SYNPS_EDAC_ERR_GRAIN;
795			dimm->dtype	= p_data->get_dtype(priv->baseaddr);
796		}
797	}
798}
799
800/**
801 * mc_init - Initialize one driver instance.
802 * @mci:	EDAC memory controller instance.
803 * @pdev:	platform device.
804 *
805 * Perform initialization of the EDAC memory controller instance and
806 * related driver-private data associated with the memory controller the
807 * instance is bound to.
808 */
809static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
810{
811	struct synps_edac_priv *priv;
812
813	mci->pdev = &pdev->dev;
814	priv = mci->pvt_info;
815	platform_set_drvdata(pdev, mci);
816
817	/* Initialize controller capabilities and configuration */
818	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
819	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
820	mci->scrub_cap = SCRUB_HW_SRC;
821	mci->scrub_mode = SCRUB_NONE;
822
823	mci->edac_cap = EDAC_FLAG_SECDED;
824	mci->ctl_name = "synps_ddr_controller";
825	mci->dev_name = SYNPS_EDAC_MOD_STRING;
826	mci->mod_name = SYNPS_EDAC_MOD_VER;
827
828	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
829		edac_op_state = EDAC_OPSTATE_INT;
830	} else {
831		edac_op_state = EDAC_OPSTATE_POLL;
832		mci->edac_check = check_errors;
833	}
834
835	mci->ctl_page_to_phys = NULL;
836
837	init_csrows(mci);
838}
839
840static void enable_intr(struct synps_edac_priv *priv)
841{
842	/* Enable UE/CE Interrupts */
843	writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
844			priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
845}
846
847static void disable_intr(struct synps_edac_priv *priv)
848{
849	/* Disable UE/CE Interrupts */
850	writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
851			priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
852}
853
854static int setup_irq(struct mem_ctl_info *mci,
855		     struct platform_device *pdev)
856{
857	struct synps_edac_priv *priv = mci->pvt_info;
858	int ret, irq;
859
860	irq = platform_get_irq(pdev, 0);
861	if (irq < 0) {
862		edac_printk(KERN_ERR, EDAC_MC,
863			    "No IRQ %d in DT\n", irq);
864		return irq;
865	}
866
867	ret = devm_request_irq(&pdev->dev, irq, intr_handler,
868			       0, dev_name(&pdev->dev), mci);
869	if (ret < 0) {
870		edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
871		return ret;
872	}
873
874	enable_intr(priv);
875
876	return 0;
877}
878
879static const struct synps_platform_data zynq_edac_def = {
880	.get_error_info	= zynq_get_error_info,
881	.get_mtype	= zynq_get_mtype,
882	.get_dtype	= zynq_get_dtype,
883	.get_ecc_state	= zynq_get_ecc_state,
884	.quirks		= 0,
885};
886
887static const struct synps_platform_data zynqmp_edac_def = {
888	.get_error_info	= zynqmp_get_error_info,
889	.get_mtype	= zynqmp_get_mtype,
890	.get_dtype	= zynqmp_get_dtype,
891	.get_ecc_state	= zynqmp_get_ecc_state,
892	.quirks         = (DDR_ECC_INTR_SUPPORT
893#ifdef CONFIG_EDAC_DEBUG
894			  | DDR_ECC_DATA_POISON_SUPPORT
895#endif
896			  ),
897};
898
899static const struct of_device_id synps_edac_match[] = {
900	{
901		.compatible = "xlnx,zynq-ddrc-a05",
902		.data = (void *)&zynq_edac_def
903	},
904	{
905		.compatible = "xlnx,zynqmp-ddrc-2.40a",
906		.data = (void *)&zynqmp_edac_def
907	},
908	{
909		/* end of table */
910	}
911};
912
913MODULE_DEVICE_TABLE(of, synps_edac_match);
914
915#ifdef CONFIG_EDAC_DEBUG
916#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
917
918/**
919 * ddr_poison_setup -	Update poison registers.
920 * @priv:		DDR memory controller private instance data.
921 *
922 * Update poison registers as per DDR mapping.
923 * Return: none.
924 */
925static void ddr_poison_setup(struct synps_edac_priv *priv)
926{
927	int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
928	int index;
929	ulong hif_addr = 0;
930
931	hif_addr = priv->poison_addr >> 3;
932
933	for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
934		if (priv->row_shift[index])
935			row |= (((hif_addr >> priv->row_shift[index]) &
936						BIT(0)) << index);
937		else
938			break;
939	}
940
941	for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
942		if (priv->col_shift[index] || index < 3)
943			col |= (((hif_addr >> priv->col_shift[index]) &
944						BIT(0)) << index);
945		else
946			break;
947	}
948
949	for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
950		if (priv->bank_shift[index])
951			bank |= (((hif_addr >> priv->bank_shift[index]) &
952						BIT(0)) << index);
953		else
954			break;
955	}
956
957	for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
958		if (priv->bankgrp_shift[index])
959			bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
960						& BIT(0)) << index);
961		else
962			break;
963	}
964
965	if (priv->rank_shift[0])
966		rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
967
968	regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
969	regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
970	writel(regval, priv->baseaddr + ECC_POISON0_OFST);
971
972	regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
973	regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
974	regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
975	writel(regval, priv->baseaddr + ECC_POISON1_OFST);
976}
977
978static ssize_t inject_data_error_show(struct device *dev,
979				      struct device_attribute *mattr,
980				      char *data)
981{
982	struct mem_ctl_info *mci = to_mci(dev);
983	struct synps_edac_priv *priv = mci->pvt_info;
984
985	return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
986			"Error injection Address: 0x%lx\n\r",
987			readl(priv->baseaddr + ECC_POISON0_OFST),
988			readl(priv->baseaddr + ECC_POISON1_OFST),
989			priv->poison_addr);
990}
991
992static ssize_t inject_data_error_store(struct device *dev,
993				       struct device_attribute *mattr,
994				       const char *data, size_t count)
995{
996	struct mem_ctl_info *mci = to_mci(dev);
997	struct synps_edac_priv *priv = mci->pvt_info;
998
999	if (kstrtoul(data, 0, &priv->poison_addr))
1000		return -EINVAL;
1001
1002	ddr_poison_setup(priv);
1003
1004	return count;
1005}
1006
1007static ssize_t inject_data_poison_show(struct device *dev,
1008				       struct device_attribute *mattr,
1009				       char *data)
1010{
1011	struct mem_ctl_info *mci = to_mci(dev);
1012	struct synps_edac_priv *priv = mci->pvt_info;
1013
1014	return sprintf(data, "Data Poisoning: %s\n\r",
1015			(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1016			? ("Correctable Error") : ("UnCorrectable Error"));
1017}
1018
1019static ssize_t inject_data_poison_store(struct device *dev,
1020					struct device_attribute *mattr,
1021					const char *data, size_t count)
1022{
1023	struct mem_ctl_info *mci = to_mci(dev);
1024	struct synps_edac_priv *priv = mci->pvt_info;
1025
1026	writel(0, priv->baseaddr + DDRC_SWCTL);
1027	if (strncmp(data, "CE", 2) == 0)
1028		writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1029	else
1030		writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1031	writel(1, priv->baseaddr + DDRC_SWCTL);
1032
1033	return count;
1034}
1035
1036static DEVICE_ATTR_RW(inject_data_error);
1037static DEVICE_ATTR_RW(inject_data_poison);
1038
1039static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1040{
1041	int rc;
1042
1043	rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1044	if (rc < 0)
1045		return rc;
1046	rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1047	if (rc < 0)
1048		return rc;
1049	return 0;
1050}
1051
1052static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1053{
1054	device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1055	device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1056}
1057
1058static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1059{
1060	u32 addrmap_row_b2_10;
1061	int index;
1062
1063	priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1064	priv->row_shift[1] = ((addrmap[5] >> 8) &
1065			ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1066
1067	addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1068	if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1069		for (index = 2; index < 11; index++)
1070			priv->row_shift[index] = addrmap_row_b2_10 +
1071				index + ROW_B0_BASE;
1072
1073	} else {
1074		priv->row_shift[2] = (addrmap[9] &
1075				ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1076		priv->row_shift[3] = ((addrmap[9] >> 8) &
1077				ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1078		priv->row_shift[4] = ((addrmap[9] >> 16) &
1079				ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1080		priv->row_shift[5] = ((addrmap[9] >> 24) &
1081				ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1082		priv->row_shift[6] = (addrmap[10] &
1083				ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1084		priv->row_shift[7] = ((addrmap[10] >> 8) &
1085				ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1086		priv->row_shift[8] = ((addrmap[10] >> 16) &
1087				ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1088		priv->row_shift[9] = ((addrmap[10] >> 24) &
1089				ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1090		priv->row_shift[10] = (addrmap[11] &
1091				ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1092	}
1093
1094	priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1095				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1096				ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1097	priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1098				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1099				ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1100	priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1101				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1102				ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1103	priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1104				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1105				ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1106	priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1107				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1108				ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1109	priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1110				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1111				ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1112	priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1113				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1114				ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1115}
1116
1117static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1118{
1119	u32 width, memtype;
1120	int index;
1121
1122	memtype = readl(priv->baseaddr + CTRL_OFST);
1123	width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1124
1125	priv->col_shift[0] = 0;
1126	priv->col_shift[1] = 1;
1127	priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1128	priv->col_shift[3] = ((addrmap[2] >> 8) &
1129			COL_MAX_VAL_MASK) + COL_B3_BASE;
1130	priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1131			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1132					COL_MAX_VAL_MASK) + COL_B4_BASE);
1133	priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1134			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1135					COL_MAX_VAL_MASK) + COL_B5_BASE);
1136	priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1137			COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1138					COL_MAX_VAL_MASK) + COL_B6_BASE);
1139	priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1140			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1141					COL_MAX_VAL_MASK) + COL_B7_BASE);
1142	priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1143			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1144					COL_MAX_VAL_MASK) + COL_B8_BASE);
1145	priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1146			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1147					COL_MAX_VAL_MASK) + COL_B9_BASE);
1148	if (width == DDRCTL_EWDTH_64) {
1149		if (memtype & MEM_TYPE_LPDDR3) {
1150			priv->col_shift[10] = ((addrmap[4] &
1151				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1152				((addrmap[4] & COL_MAX_VAL_MASK) +
1153				 COL_B10_BASE);
1154			priv->col_shift[11] = (((addrmap[4] >> 8) &
1155				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1156				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1157				 COL_B11_BASE);
1158		} else {
1159			priv->col_shift[11] = ((addrmap[4] &
1160				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1161				((addrmap[4] & COL_MAX_VAL_MASK) +
1162				 COL_B10_BASE);
1163			priv->col_shift[13] = (((addrmap[4] >> 8) &
1164				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1165				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1166				 COL_B11_BASE);
1167		}
1168	} else if (width == DDRCTL_EWDTH_32) {
1169		if (memtype & MEM_TYPE_LPDDR3) {
1170			priv->col_shift[10] = (((addrmap[3] >> 24) &
1171				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1172				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1173				 COL_B9_BASE);
1174			priv->col_shift[11] = ((addrmap[4] &
1175				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1176				((addrmap[4] & COL_MAX_VAL_MASK) +
1177				 COL_B10_BASE);
1178		} else {
1179			priv->col_shift[11] = (((addrmap[3] >> 24) &
1180				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1181				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1182				 COL_B9_BASE);
1183			priv->col_shift[13] = ((addrmap[4] &
1184				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1185				((addrmap[4] & COL_MAX_VAL_MASK) +
1186				 COL_B10_BASE);
1187		}
1188	} else {
1189		if (memtype & MEM_TYPE_LPDDR3) {
1190			priv->col_shift[10] = (((addrmap[3] >> 16) &
1191				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1192				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1193				 COL_B8_BASE);
1194			priv->col_shift[11] = (((addrmap[3] >> 24) &
1195				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1196				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1197				 COL_B9_BASE);
1198			priv->col_shift[13] = ((addrmap[4] &
1199				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1200				((addrmap[4] & COL_MAX_VAL_MASK) +
1201				 COL_B10_BASE);
1202		} else {
1203			priv->col_shift[11] = (((addrmap[3] >> 16) &
1204				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1205				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1206				 COL_B8_BASE);
1207			priv->col_shift[13] = (((addrmap[3] >> 24) &
1208				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1209				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1210				 COL_B9_BASE);
1211		}
1212	}
1213
1214	if (width) {
1215		for (index = 9; index > width; index--) {
1216			priv->col_shift[index] = priv->col_shift[index - width];
1217			priv->col_shift[index - width] = 0;
1218		}
1219	}
1220
1221}
1222
1223static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1224{
1225	priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1226	priv->bank_shift[1] = ((addrmap[1] >> 8) &
1227				BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1228	priv->bank_shift[2] = (((addrmap[1] >> 16) &
1229				BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1230				(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1231				 BANK_B2_BASE);
1232
1233}
1234
1235static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1236{
1237	priv->bankgrp_shift[0] = (addrmap[8] &
1238				BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1239	priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1240				BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1241				& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1242
1243}
1244
1245static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1246{
1247	priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1248				RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1249				RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1250}
1251
1252/**
1253 * setup_address_map -	Set Address Map by querying ADDRMAP registers.
1254 * @priv:		DDR memory controller private instance data.
1255 *
1256 * Set Address Map by querying ADDRMAP registers.
1257 *
1258 * Return: none.
1259 */
1260static void setup_address_map(struct synps_edac_priv *priv)
1261{
1262	u32 addrmap[12];
1263	int index;
1264
1265	for (index = 0; index < 12; index++) {
1266		u32 addrmap_offset;
1267
1268		addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1269		addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1270	}
1271
1272	setup_row_address_map(priv, addrmap);
1273
1274	setup_column_address_map(priv, addrmap);
1275
1276	setup_bank_address_map(priv, addrmap);
1277
1278	setup_bg_address_map(priv, addrmap);
1279
1280	setup_rank_address_map(priv, addrmap);
1281}
1282#endif /* CONFIG_EDAC_DEBUG */
1283
1284/**
1285 * mc_probe - Check controller and bind driver.
1286 * @pdev:	platform device.
1287 *
1288 * Probe a specific controller instance for binding with the driver.
1289 *
1290 * Return: 0 if the controller instance was successfully bound to the
1291 * driver; otherwise, < 0 on error.
1292 */
1293static int mc_probe(struct platform_device *pdev)
1294{
1295	const struct synps_platform_data *p_data;
1296	struct edac_mc_layer layers[2];
1297	struct synps_edac_priv *priv;
1298	struct mem_ctl_info *mci;
1299	void __iomem *baseaddr;
1300	struct resource *res;
1301	int rc;
1302
1303	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1304	baseaddr = devm_ioremap_resource(&pdev->dev, res);
1305	if (IS_ERR(baseaddr))
1306		return PTR_ERR(baseaddr);
1307
1308	p_data = of_device_get_match_data(&pdev->dev);
1309	if (!p_data)
1310		return -ENODEV;
1311
1312	if (!p_data->get_ecc_state(baseaddr)) {
1313		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1314		return -ENXIO;
1315	}
1316
1317	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1318	layers[0].size = SYNPS_EDAC_NR_CSROWS;
1319	layers[0].is_virt_csrow = true;
1320	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1321	layers[1].size = SYNPS_EDAC_NR_CHANS;
1322	layers[1].is_virt_csrow = false;
1323
1324	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1325			    sizeof(struct synps_edac_priv));
1326	if (!mci) {
1327		edac_printk(KERN_ERR, EDAC_MC,
1328			    "Failed memory allocation for mc instance\n");
1329		return -ENOMEM;
1330	}
1331
1332	priv = mci->pvt_info;
1333	priv->baseaddr = baseaddr;
1334	priv->p_data = p_data;
1335
1336	mc_init(mci, pdev);
1337
1338	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1339		rc = setup_irq(mci, pdev);
1340		if (rc)
1341			goto free_edac_mc;
1342	}
1343
1344	rc = edac_mc_add_mc(mci);
1345	if (rc) {
1346		edac_printk(KERN_ERR, EDAC_MC,
1347			    "Failed to register with EDAC core\n");
1348		goto free_edac_mc;
1349	}
1350
1351#ifdef CONFIG_EDAC_DEBUG
1352	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1353		if (edac_create_sysfs_attributes(mci)) {
1354			edac_printk(KERN_ERR, EDAC_MC,
1355					"Failed to create sysfs entries\n");
1356			goto free_edac_mc;
1357		}
1358	}
1359
1360	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1361		setup_address_map(priv);
1362#endif
1363
1364	/*
1365	 * Start capturing the correctable and uncorrectable errors. A write of
1366	 * 0 starts the counters.
1367	 */
1368	if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1369		writel(0x0, baseaddr + ECC_CTRL_OFST);
1370
1371	return rc;
1372
1373free_edac_mc:
1374	edac_mc_free(mci);
1375
1376	return rc;
1377}
1378
1379/**
1380 * mc_remove - Unbind driver from controller.
1381 * @pdev:	Platform device.
1382 *
1383 * Return: Unconditionally 0
1384 */
1385static int mc_remove(struct platform_device *pdev)
1386{
1387	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1388	struct synps_edac_priv *priv = mci->pvt_info;
1389
1390	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1391		disable_intr(priv);
1392
1393#ifdef CONFIG_EDAC_DEBUG
1394	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1395		edac_remove_sysfs_attributes(mci);
1396#endif
1397
1398	edac_mc_del_mc(&pdev->dev);
1399	edac_mc_free(mci);
1400
1401	return 0;
1402}
1403
1404static struct platform_driver synps_edac_mc_driver = {
1405	.driver = {
1406		   .name = "synopsys-edac",
1407		   .of_match_table = synps_edac_match,
1408		   },
1409	.probe = mc_probe,
1410	.remove = mc_remove,
1411};
1412
1413module_platform_driver(synps_edac_mc_driver);
1414
1415MODULE_AUTHOR("Xilinx Inc");
1416MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1417MODULE_LICENSE("GPL v2");
1418