cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

synopsys_edac.c (38906B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Synopsys DDR ECC Driver
      4 * This driver is based on ppc4xx_edac.c drivers
      5 *
      6 * Copyright (C) 2012 - 2014 Xilinx, Inc.
      7 */
      8
      9#include <linux/edac.h>
     10#include <linux/module.h>
     11#include <linux/platform_device.h>
     12#include <linux/interrupt.h>
     13#include <linux/of.h>
     14#include <linux/of_device.h>
     15
     16#include "edac_module.h"
     17
     18/* Number of cs_rows needed per memory controller */
     19#define SYNPS_EDAC_NR_CSROWS		1
     20
     21/* Number of channels per memory controller */
     22#define SYNPS_EDAC_NR_CHANS		1
     23
     24/* Granularity of reported error in bytes */
     25#define SYNPS_EDAC_ERR_GRAIN		1
     26
     27#define SYNPS_EDAC_MSG_SIZE		256
     28
     29#define SYNPS_EDAC_MOD_STRING		"synps_edac"
     30#define SYNPS_EDAC_MOD_VER		"1"
     31
     32/* Synopsys DDR memory controller registers that are relevant to ECC */
     33#define CTRL_OFST			0x0
     34#define T_ZQ_OFST			0xA4
     35
     36/* ECC control register */
     37#define ECC_CTRL_OFST			0xC4
     38/* ECC log register */
     39#define CE_LOG_OFST			0xC8
     40/* ECC address register */
     41#define CE_ADDR_OFST			0xCC
     42/* ECC data[31:0] register */
     43#define CE_DATA_31_0_OFST		0xD0
     44
     45/* Uncorrectable error info registers */
     46#define UE_LOG_OFST			0xDC
     47#define UE_ADDR_OFST			0xE0
     48#define UE_DATA_31_0_OFST		0xE4
     49
     50#define STAT_OFST			0xF0
     51#define SCRUB_OFST			0xF4
     52
     53/* Control register bit field definitions */
     54#define CTRL_BW_MASK			0xC
     55#define CTRL_BW_SHIFT			2
     56
     57#define DDRCTL_WDTH_16			1
     58#define DDRCTL_WDTH_32			0
     59
     60/* ZQ register bit field definitions */
     61#define T_ZQ_DDRMODE_MASK		0x2
     62
     63/* ECC control register bit field definitions */
     64#define ECC_CTRL_CLR_CE_ERR		0x2
     65#define ECC_CTRL_CLR_UE_ERR		0x1
     66
     67/* ECC correctable/uncorrectable error log register definitions */
     68#define LOG_VALID			0x1
     69#define CE_LOG_BITPOS_MASK		0xFE
     70#define CE_LOG_BITPOS_SHIFT		1
     71
     72/* ECC correctable/uncorrectable error address register definitions */
     73#define ADDR_COL_MASK			0xFFF
     74#define ADDR_ROW_MASK			0xFFFF000
     75#define ADDR_ROW_SHIFT			12
     76#define ADDR_BANK_MASK			0x70000000
     77#define ADDR_BANK_SHIFT			28
     78
     79/* ECC statistic register definitions */
     80#define STAT_UECNT_MASK			0xFF
     81#define STAT_CECNT_MASK			0xFF00
     82#define STAT_CECNT_SHIFT		8
     83
     84/* ECC scrub register definitions */
     85#define SCRUB_MODE_MASK			0x7
     86#define SCRUB_MODE_SECDED		0x4
     87
     88/* DDR ECC Quirks */
     89#define DDR_ECC_INTR_SUPPORT		BIT(0)
     90#define DDR_ECC_DATA_POISON_SUPPORT	BIT(1)
     91#define DDR_ECC_INTR_SELF_CLEAR		BIT(2)
     92
     93/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
     94/* ECC Configuration Registers */
     95#define ECC_CFG0_OFST			0x70
     96#define ECC_CFG1_OFST			0x74
     97
     98/* ECC Status Register */
     99#define ECC_STAT_OFST			0x78
    100
    101/* ECC Clear Register */
    102#define ECC_CLR_OFST			0x7C
    103
    104/* ECC Error count Register */
    105#define ECC_ERRCNT_OFST			0x80
    106
    107/* ECC Corrected Error Address Register */
    108#define ECC_CEADDR0_OFST		0x84
    109#define ECC_CEADDR1_OFST		0x88
    110
    111/* ECC Syndrome Registers */
    112#define ECC_CSYND0_OFST			0x8C
    113#define ECC_CSYND1_OFST			0x90
    114#define ECC_CSYND2_OFST			0x94
    115
    116/* ECC Bit Mask0 Address Register */
    117#define ECC_BITMASK0_OFST		0x98
    118#define ECC_BITMASK1_OFST		0x9C
    119#define ECC_BITMASK2_OFST		0xA0
    120
    121/* ECC UnCorrected Error Address Register */
    122#define ECC_UEADDR0_OFST		0xA4
    123#define ECC_UEADDR1_OFST		0xA8
    124
    125/* ECC Syndrome Registers */
    126#define ECC_UESYND0_OFST		0xAC
    127#define ECC_UESYND1_OFST		0xB0
    128#define ECC_UESYND2_OFST		0xB4
    129
    130/* ECC Poison Address Reg */
    131#define ECC_POISON0_OFST		0xB8
    132#define ECC_POISON1_OFST		0xBC
    133
    134#define ECC_ADDRMAP0_OFFSET		0x200
    135
    136/* Control register bitfield definitions */
    137#define ECC_CTRL_BUSWIDTH_MASK		0x3000
    138#define ECC_CTRL_BUSWIDTH_SHIFT		12
    139#define ECC_CTRL_CLR_CE_ERRCNT		BIT(2)
    140#define ECC_CTRL_CLR_UE_ERRCNT		BIT(3)
    141
    142/* DDR Control Register width definitions  */
    143#define DDRCTL_EWDTH_16			2
    144#define DDRCTL_EWDTH_32			1
    145#define DDRCTL_EWDTH_64			0
    146
    147/* ECC status register definitions */
    148#define ECC_STAT_UECNT_MASK		0xF0000
    149#define ECC_STAT_UECNT_SHIFT		16
    150#define ECC_STAT_CECNT_MASK		0xF00
    151#define ECC_STAT_CECNT_SHIFT		8
    152#define ECC_STAT_BITNUM_MASK		0x7F
    153
    154/* ECC error count register definitions */
    155#define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
    156#define ECC_ERRCNT_UECNT_SHIFT		16
    157#define ECC_ERRCNT_CECNT_MASK		0xFFFF
    158
    159/* DDR QOS Interrupt register definitions */
    160#define DDR_QOS_IRQ_STAT_OFST		0x20200
    161#define DDR_QOSUE_MASK			0x4
    162#define	DDR_QOSCE_MASK			0x2
    163#define	ECC_CE_UE_INTR_MASK		0x6
    164#define DDR_QOS_IRQ_EN_OFST		0x20208
    165#define DDR_QOS_IRQ_DB_OFST		0x2020C
    166
    167/* DDR QOS Interrupt register definitions */
    168#define DDR_UE_MASK			BIT(9)
    169#define DDR_CE_MASK			BIT(8)
    170
    171/* ECC Corrected Error Register Mask and Shifts*/
    172#define ECC_CEADDR0_RW_MASK		0x3FFFF
    173#define ECC_CEADDR0_RNK_MASK		BIT(24)
    174#define ECC_CEADDR1_BNKGRP_MASK		0x3000000
    175#define ECC_CEADDR1_BNKNR_MASK		0x70000
    176#define ECC_CEADDR1_BLKNR_MASK		0xFFF
    177#define ECC_CEADDR1_BNKGRP_SHIFT	24
    178#define ECC_CEADDR1_BNKNR_SHIFT		16
    179
    180/* ECC Poison register shifts */
    181#define ECC_POISON0_RANK_SHIFT		24
    182#define ECC_POISON0_RANK_MASK		BIT(24)
    183#define ECC_POISON0_COLUMN_SHIFT	0
    184#define ECC_POISON0_COLUMN_MASK		0xFFF
    185#define ECC_POISON1_BG_SHIFT		28
    186#define ECC_POISON1_BG_MASK		0x30000000
    187#define ECC_POISON1_BANKNR_SHIFT	24
    188#define ECC_POISON1_BANKNR_MASK		0x7000000
    189#define ECC_POISON1_ROW_SHIFT		0
    190#define ECC_POISON1_ROW_MASK		0x3FFFF
    191
    192/* DDR Memory type defines */
    193#define MEM_TYPE_DDR3			0x1
    194#define MEM_TYPE_LPDDR3			0x8
    195#define MEM_TYPE_DDR2			0x4
    196#define MEM_TYPE_DDR4			0x10
    197#define MEM_TYPE_LPDDR4			0x20
    198
    199/* DDRC Software control register */
    200#define DDRC_SWCTL			0x320
    201
    202/* DDRC ECC CE & UE poison mask */
    203#define ECC_CEPOISON_MASK		0x3
    204#define ECC_UEPOISON_MASK		0x1
    205
    206/* DDRC Device config masks */
    207#define DDRC_MSTR_CFG_MASK		0xC0000000
    208#define DDRC_MSTR_CFG_SHIFT		30
    209#define DDRC_MSTR_CFG_X4_MASK		0x0
    210#define DDRC_MSTR_CFG_X8_MASK		0x1
    211#define DDRC_MSTR_CFG_X16_MASK		0x2
    212#define DDRC_MSTR_CFG_X32_MASK		0x3
    213
    214#define DDR_MAX_ROW_SHIFT		18
    215#define DDR_MAX_COL_SHIFT		14
    216#define DDR_MAX_BANK_SHIFT		3
    217#define DDR_MAX_BANKGRP_SHIFT		2
    218
    219#define ROW_MAX_VAL_MASK		0xF
    220#define COL_MAX_VAL_MASK		0xF
    221#define BANK_MAX_VAL_MASK		0x1F
    222#define BANKGRP_MAX_VAL_MASK		0x1F
    223#define RANK_MAX_VAL_MASK		0x1F
    224
    225#define ROW_B0_BASE			6
    226#define ROW_B1_BASE			7
    227#define ROW_B2_BASE			8
    228#define ROW_B3_BASE			9
    229#define ROW_B4_BASE			10
    230#define ROW_B5_BASE			11
    231#define ROW_B6_BASE			12
    232#define ROW_B7_BASE			13
    233#define ROW_B8_BASE			14
    234#define ROW_B9_BASE			15
    235#define ROW_B10_BASE			16
    236#define ROW_B11_BASE			17
    237#define ROW_B12_BASE			18
    238#define ROW_B13_BASE			19
    239#define ROW_B14_BASE			20
    240#define ROW_B15_BASE			21
    241#define ROW_B16_BASE			22
    242#define ROW_B17_BASE			23
    243
    244#define COL_B2_BASE			2
    245#define COL_B3_BASE			3
    246#define COL_B4_BASE			4
    247#define COL_B5_BASE			5
    248#define COL_B6_BASE			6
    249#define COL_B7_BASE			7
    250#define COL_B8_BASE			8
    251#define COL_B9_BASE			9
    252#define COL_B10_BASE			10
    253#define COL_B11_BASE			11
    254#define COL_B12_BASE			12
    255#define COL_B13_BASE			13
    256
    257#define BANK_B0_BASE			2
    258#define BANK_B1_BASE			3
    259#define BANK_B2_BASE			4
    260
    261#define BANKGRP_B0_BASE			2
    262#define BANKGRP_B1_BASE			3
    263
    264#define RANK_B0_BASE			6
    265
    266/**
    267 * struct ecc_error_info - ECC error log information.
    268 * @row:	Row number.
    269 * @col:	Column number.
    270 * @bank:	Bank number.
    271 * @bitpos:	Bit position.
    272 * @data:	Data causing the error.
    273 * @bankgrpnr:	Bank group number.
    274 * @blknr:	Block number.
    275 */
    276struct ecc_error_info {
    277	u32 row;
    278	u32 col;
    279	u32 bank;
    280	u32 bitpos;
    281	u32 data;
    282	u32 bankgrpnr;
    283	u32 blknr;
    284};
    285
    286/**
    287 * struct synps_ecc_status - ECC status information to report.
    288 * @ce_cnt:	Correctable error count.
    289 * @ue_cnt:	Uncorrectable error count.
    290 * @ceinfo:	Correctable error log information.
    291 * @ueinfo:	Uncorrectable error log information.
    292 */
    293struct synps_ecc_status {
    294	u32 ce_cnt;
    295	u32 ue_cnt;
    296	struct ecc_error_info ceinfo;
    297	struct ecc_error_info ueinfo;
    298};
    299
    300/**
    301 * struct synps_edac_priv - DDR memory controller private instance data.
    302 * @baseaddr:		Base address of the DDR controller.
    303 * @message:		Buffer for framing the event specific info.
    304 * @stat:		ECC status information.
    305 * @p_data:		Platform data.
    306 * @ce_cnt:		Correctable Error count.
    307 * @ue_cnt:		Uncorrectable Error count.
    308 * @poison_addr:	Data poison address.
    309 * @row_shift:		Bit shifts for row bit.
    310 * @col_shift:		Bit shifts for column bit.
    311 * @bank_shift:		Bit shifts for bank bit.
    312 * @bankgrp_shift:	Bit shifts for bank group bit.
    313 * @rank_shift:		Bit shifts for rank bit.
    314 */
    315struct synps_edac_priv {
    316	void __iomem *baseaddr;
    317	char message[SYNPS_EDAC_MSG_SIZE];
    318	struct synps_ecc_status stat;
    319	const struct synps_platform_data *p_data;
    320	u32 ce_cnt;
    321	u32 ue_cnt;
    322#ifdef CONFIG_EDAC_DEBUG
    323	ulong poison_addr;
    324	u32 row_shift[18];
    325	u32 col_shift[14];
    326	u32 bank_shift[3];
    327	u32 bankgrp_shift[2];
    328	u32 rank_shift[1];
    329#endif
    330};
    331
    332/**
    333 * struct synps_platform_data -  synps platform data structure.
    334 * @get_error_info:	Get EDAC error info.
    335 * @get_mtype:		Get mtype.
    336 * @get_dtype:		Get dtype.
    337 * @get_ecc_state:	Get ECC state.
    338 * @quirks:		To differentiate IPs.
    339 */
    340struct synps_platform_data {
    341	int (*get_error_info)(struct synps_edac_priv *priv);
    342	enum mem_type (*get_mtype)(const void __iomem *base);
    343	enum dev_type (*get_dtype)(const void __iomem *base);
    344	bool (*get_ecc_state)(void __iomem *base);
    345	int quirks;
    346};
    347
    348/**
    349 * zynq_get_error_info - Get the current ECC error info.
    350 * @priv:	DDR memory controller private instance data.
    351 *
    352 * Return: one if there is no error, otherwise zero.
    353 */
    354static int zynq_get_error_info(struct synps_edac_priv *priv)
    355{
    356	struct synps_ecc_status *p;
    357	u32 regval, clearval = 0;
    358	void __iomem *base;
    359
    360	base = priv->baseaddr;
    361	p = &priv->stat;
    362
    363	regval = readl(base + STAT_OFST);
    364	if (!regval)
    365		return 1;
    366
    367	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
    368	p->ue_cnt = regval & STAT_UECNT_MASK;
    369
    370	regval = readl(base + CE_LOG_OFST);
    371	if (!(p->ce_cnt && (regval & LOG_VALID)))
    372		goto ue_err;
    373
    374	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
    375	regval = readl(base + CE_ADDR_OFST);
    376	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
    377	p->ceinfo.col = regval & ADDR_COL_MASK;
    378	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
    379	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
    380	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
    381		 p->ceinfo.data);
    382	clearval = ECC_CTRL_CLR_CE_ERR;
    383
    384ue_err:
    385	regval = readl(base + UE_LOG_OFST);
    386	if (!(p->ue_cnt && (regval & LOG_VALID)))
    387		goto out;
    388
    389	regval = readl(base + UE_ADDR_OFST);
    390	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
    391	p->ueinfo.col = regval & ADDR_COL_MASK;
    392	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
    393	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
    394	clearval |= ECC_CTRL_CLR_UE_ERR;
    395
    396out:
    397	writel(clearval, base + ECC_CTRL_OFST);
    398	writel(0x0, base + ECC_CTRL_OFST);
    399
    400	return 0;
    401}
    402
    403/**
    404 * zynqmp_get_error_info - Get the current ECC error info.
    405 * @priv:	DDR memory controller private instance data.
    406 *
    407 * Return: one if there is no error otherwise returns zero.
    408 */
    409static int zynqmp_get_error_info(struct synps_edac_priv *priv)
    410{
    411	struct synps_ecc_status *p;
    412	u32 regval, clearval = 0;
    413	void __iomem *base;
    414
    415	base = priv->baseaddr;
    416	p = &priv->stat;
    417
    418	regval = readl(base + ECC_ERRCNT_OFST);
    419	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
    420	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
    421	if (!p->ce_cnt)
    422		goto ue_err;
    423
    424	regval = readl(base + ECC_STAT_OFST);
    425	if (!regval)
    426		return 1;
    427
    428	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
    429
    430	regval = readl(base + ECC_CEADDR0_OFST);
    431	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
    432	regval = readl(base + ECC_CEADDR1_OFST);
    433	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
    434					ECC_CEADDR1_BNKNR_SHIFT;
    435	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
    436					ECC_CEADDR1_BNKGRP_SHIFT;
    437	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
    438	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
    439	edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
    440		 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
    441		 readl(base + ECC_CSYND2_OFST));
    442ue_err:
    443	if (!p->ue_cnt)
    444		goto out;
    445
    446	regval = readl(base + ECC_UEADDR0_OFST);
    447	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
    448	regval = readl(base + ECC_UEADDR1_OFST);
    449	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
    450					ECC_CEADDR1_BNKGRP_SHIFT;
    451	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
    452					ECC_CEADDR1_BNKNR_SHIFT;
    453	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
    454	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
    455out:
    456	clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
    457	clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
    458	writel(clearval, base + ECC_CLR_OFST);
    459	writel(0x0, base + ECC_CLR_OFST);
    460
    461	return 0;
    462}
    463
    464/**
    465 * handle_error - Handle Correctable and Uncorrectable errors.
    466 * @mci:	EDAC memory controller instance.
    467 * @p:		Synopsys ECC status structure.
    468 *
    469 * Handles ECC correctable and uncorrectable errors.
    470 */
    471static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
    472{
    473	struct synps_edac_priv *priv = mci->pvt_info;
    474	struct ecc_error_info *pinf;
    475
    476	if (p->ce_cnt) {
    477		pinf = &p->ceinfo;
    478		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
    479			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
    480				 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
    481				 "CE", pinf->row, pinf->bank,
    482				 pinf->bankgrpnr, pinf->blknr,
    483				 pinf->bitpos, pinf->data);
    484		} else {
    485			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
    486				 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
    487				 "CE", pinf->row, pinf->bank, pinf->col,
    488				 pinf->bitpos, pinf->data);
    489		}
    490
    491		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
    492				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
    493				     priv->message, "");
    494	}
    495
    496	if (p->ue_cnt) {
    497		pinf = &p->ueinfo;
    498		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
    499			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
    500				 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
    501				 "UE", pinf->row, pinf->bank,
    502				 pinf->bankgrpnr, pinf->blknr);
    503		} else {
    504			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
    505				 "DDR ECC error type :%s Row %d Bank %d Col %d ",
    506				 "UE", pinf->row, pinf->bank, pinf->col);
    507		}
    508
    509		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
    510				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
    511				     priv->message, "");
    512	}
    513
    514	memset(p, 0, sizeof(*p));
    515}
    516
    517/**
    518 * intr_handler - Interrupt Handler for ECC interrupts.
    519 * @irq:        IRQ number.
    520 * @dev_id:     Device ID.
    521 *
    522 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
    523 */
    524static irqreturn_t intr_handler(int irq, void *dev_id)
    525{
    526	const struct synps_platform_data *p_data;
    527	struct mem_ctl_info *mci = dev_id;
    528	struct synps_edac_priv *priv;
    529	int status, regval;
    530
    531	priv = mci->pvt_info;
    532	p_data = priv->p_data;
    533
    534	/*
    535	 * v3.0 of the controller has the ce/ue bits cleared automatically,
    536	 * so this condition does not apply.
    537	 */
    538	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
    539		regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
    540		regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
    541		if (!(regval & ECC_CE_UE_INTR_MASK))
    542			return IRQ_NONE;
    543	}
    544
    545	status = p_data->get_error_info(priv);
    546	if (status)
    547		return IRQ_NONE;
    548
    549	priv->ce_cnt += priv->stat.ce_cnt;
    550	priv->ue_cnt += priv->stat.ue_cnt;
    551	handle_error(mci, &priv->stat);
    552
    553	edac_dbg(3, "Total error count CE %d UE %d\n",
    554		 priv->ce_cnt, priv->ue_cnt);
    555	/* v3.0 of the controller does not have this register */
    556	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
    557		writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
    558	return IRQ_HANDLED;
    559}
    560
    561/**
    562 * check_errors - Check controller for ECC errors.
    563 * @mci:	EDAC memory controller instance.
    564 *
    565 * Check and post ECC errors. Called by the polling thread.
    566 */
    567static void check_errors(struct mem_ctl_info *mci)
    568{
    569	const struct synps_platform_data *p_data;
    570	struct synps_edac_priv *priv;
    571	int status;
    572
    573	priv = mci->pvt_info;
    574	p_data = priv->p_data;
    575
    576	status = p_data->get_error_info(priv);
    577	if (status)
    578		return;
    579
    580	priv->ce_cnt += priv->stat.ce_cnt;
    581	priv->ue_cnt += priv->stat.ue_cnt;
    582	handle_error(mci, &priv->stat);
    583
    584	edac_dbg(3, "Total error count CE %d UE %d\n",
    585		 priv->ce_cnt, priv->ue_cnt);
    586}
    587
    588/**
    589 * zynq_get_dtype - Return the controller memory width.
    590 * @base:	DDR memory controller base address.
    591 *
    592 * Get the EDAC device type width appropriate for the current controller
    593 * configuration.
    594 *
    595 * Return: a device type width enumeration.
    596 */
    597static enum dev_type zynq_get_dtype(const void __iomem *base)
    598{
    599	enum dev_type dt;
    600	u32 width;
    601
    602	width = readl(base + CTRL_OFST);
    603	width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
    604
    605	switch (width) {
    606	case DDRCTL_WDTH_16:
    607		dt = DEV_X2;
    608		break;
    609	case DDRCTL_WDTH_32:
    610		dt = DEV_X4;
    611		break;
    612	default:
    613		dt = DEV_UNKNOWN;
    614	}
    615
    616	return dt;
    617}
    618
    619/**
    620 * zynqmp_get_dtype - Return the controller memory width.
    621 * @base:	DDR memory controller base address.
    622 *
    623 * Get the EDAC device type width appropriate for the current controller
    624 * configuration.
    625 *
    626 * Return: a device type width enumeration.
    627 */
    628static enum dev_type zynqmp_get_dtype(const void __iomem *base)
    629{
    630	enum dev_type dt;
    631	u32 width;
    632
    633	width = readl(base + CTRL_OFST);
    634	width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
    635	switch (width) {
    636	case DDRCTL_EWDTH_16:
    637		dt = DEV_X2;
    638		break;
    639	case DDRCTL_EWDTH_32:
    640		dt = DEV_X4;
    641		break;
    642	case DDRCTL_EWDTH_64:
    643		dt = DEV_X8;
    644		break;
    645	default:
    646		dt = DEV_UNKNOWN;
    647	}
    648
    649	return dt;
    650}
    651
    652/**
    653 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
    654 * @base:	DDR memory controller base address.
    655 *
    656 * Get the ECC enable/disable status of the controller.
    657 *
    658 * Return: true if enabled, otherwise false.
    659 */
    660static bool zynq_get_ecc_state(void __iomem *base)
    661{
    662	enum dev_type dt;
    663	u32 ecctype;
    664
    665	dt = zynq_get_dtype(base);
    666	if (dt == DEV_UNKNOWN)
    667		return false;
    668
    669	ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
    670	if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
    671		return true;
    672
    673	return false;
    674}
    675
    676/**
    677 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
    678 * @base:	DDR memory controller base address.
    679 *
    680 * Get the ECC enable/disable status for the controller.
    681 *
    682 * Return: a ECC status boolean i.e true/false - enabled/disabled.
    683 */
    684static bool zynqmp_get_ecc_state(void __iomem *base)
    685{
    686	enum dev_type dt;
    687	u32 ecctype;
    688
    689	dt = zynqmp_get_dtype(base);
    690	if (dt == DEV_UNKNOWN)
    691		return false;
    692
    693	ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
    694	if ((ecctype == SCRUB_MODE_SECDED) &&
    695	    ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
    696		return true;
    697
    698	return false;
    699}
    700
    701/**
    702 * get_memsize - Read the size of the attached memory device.
    703 *
    704 * Return: the memory size in bytes.
    705 */
    706static u32 get_memsize(void)
    707{
    708	struct sysinfo inf;
    709
    710	si_meminfo(&inf);
    711
    712	return inf.totalram * inf.mem_unit;
    713}
    714
    715/**
    716 * zynq_get_mtype - Return the controller memory type.
    717 * @base:	Synopsys ECC status structure.
    718 *
    719 * Get the EDAC memory type appropriate for the current controller
    720 * configuration.
    721 *
    722 * Return: a memory type enumeration.
    723 */
    724static enum mem_type zynq_get_mtype(const void __iomem *base)
    725{
    726	enum mem_type mt;
    727	u32 memtype;
    728
    729	memtype = readl(base + T_ZQ_OFST);
    730
    731	if (memtype & T_ZQ_DDRMODE_MASK)
    732		mt = MEM_DDR3;
    733	else
    734		mt = MEM_DDR2;
    735
    736	return mt;
    737}
    738
    739/**
    740 * zynqmp_get_mtype - Returns controller memory type.
    741 * @base:	Synopsys ECC status structure.
    742 *
    743 * Get the EDAC memory type appropriate for the current controller
    744 * configuration.
    745 *
    746 * Return: a memory type enumeration.
    747 */
    748static enum mem_type zynqmp_get_mtype(const void __iomem *base)
    749{
    750	enum mem_type mt;
    751	u32 memtype;
    752
    753	memtype = readl(base + CTRL_OFST);
    754
    755	if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
    756		mt = MEM_DDR3;
    757	else if (memtype & MEM_TYPE_DDR2)
    758		mt = MEM_RDDR2;
    759	else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
    760		mt = MEM_DDR4;
    761	else
    762		mt = MEM_EMPTY;
    763
    764	return mt;
    765}
    766
    767/**
    768 * init_csrows - Initialize the csrow data.
    769 * @mci:	EDAC memory controller instance.
    770 *
    771 * Initialize the chip select rows associated with the EDAC memory
    772 * controller instance.
    773 */
    774static void init_csrows(struct mem_ctl_info *mci)
    775{
    776	struct synps_edac_priv *priv = mci->pvt_info;
    777	const struct synps_platform_data *p_data;
    778	struct csrow_info *csi;
    779	struct dimm_info *dimm;
    780	u32 size, row;
    781	int j;
    782
    783	p_data = priv->p_data;
    784
    785	for (row = 0; row < mci->nr_csrows; row++) {
    786		csi = mci->csrows[row];
    787		size = get_memsize();
    788
    789		for (j = 0; j < csi->nr_channels; j++) {
    790			dimm		= csi->channels[j]->dimm;
    791			dimm->edac_mode	= EDAC_SECDED;
    792			dimm->mtype	= p_data->get_mtype(priv->baseaddr);
    793			dimm->nr_pages	= (size >> PAGE_SHIFT) / csi->nr_channels;
    794			dimm->grain	= SYNPS_EDAC_ERR_GRAIN;
    795			dimm->dtype	= p_data->get_dtype(priv->baseaddr);
    796		}
    797	}
    798}
    799
    800/**
    801 * mc_init - Initialize one driver instance.
    802 * @mci:	EDAC memory controller instance.
    803 * @pdev:	platform device.
    804 *
    805 * Perform initialization of the EDAC memory controller instance and
    806 * related driver-private data associated with the memory controller the
    807 * instance is bound to.
    808 */
    809static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
    810{
    811	struct synps_edac_priv *priv;
    812
    813	mci->pdev = &pdev->dev;
    814	priv = mci->pvt_info;
    815	platform_set_drvdata(pdev, mci);
    816
    817	/* Initialize controller capabilities and configuration */
    818	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
    819	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
    820	mci->scrub_cap = SCRUB_HW_SRC;
    821	mci->scrub_mode = SCRUB_NONE;
    822
    823	mci->edac_cap = EDAC_FLAG_SECDED;
    824	mci->ctl_name = "synps_ddr_controller";
    825	mci->dev_name = SYNPS_EDAC_MOD_STRING;
    826	mci->mod_name = SYNPS_EDAC_MOD_VER;
    827
    828	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
    829		edac_op_state = EDAC_OPSTATE_INT;
    830	} else {
    831		edac_op_state = EDAC_OPSTATE_POLL;
    832		mci->edac_check = check_errors;
    833	}
    834
    835	mci->ctl_page_to_phys = NULL;
    836
    837	init_csrows(mci);
    838}
    839
    840static void enable_intr(struct synps_edac_priv *priv)
    841{
    842	/* Enable UE/CE Interrupts */
    843	if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
    844		writel(DDR_UE_MASK | DDR_CE_MASK,
    845		       priv->baseaddr + ECC_CLR_OFST);
    846	else
    847		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
    848		       priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
    849
    850}
    851
    852static void disable_intr(struct synps_edac_priv *priv)
    853{
    854	/* Disable UE/CE Interrupts */
    855	writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
    856			priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
    857}
    858
    859static int setup_irq(struct mem_ctl_info *mci,
    860		     struct platform_device *pdev)
    861{
    862	struct synps_edac_priv *priv = mci->pvt_info;
    863	int ret, irq;
    864
    865	irq = platform_get_irq(pdev, 0);
    866	if (irq < 0) {
    867		edac_printk(KERN_ERR, EDAC_MC,
    868			    "No IRQ %d in DT\n", irq);
    869		return irq;
    870	}
    871
    872	ret = devm_request_irq(&pdev->dev, irq, intr_handler,
    873			       0, dev_name(&pdev->dev), mci);
    874	if (ret < 0) {
    875		edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
    876		return ret;
    877	}
    878
    879	enable_intr(priv);
    880
    881	return 0;
    882}
    883
    884static const struct synps_platform_data zynq_edac_def = {
    885	.get_error_info	= zynq_get_error_info,
    886	.get_mtype	= zynq_get_mtype,
    887	.get_dtype	= zynq_get_dtype,
    888	.get_ecc_state	= zynq_get_ecc_state,
    889	.quirks		= 0,
    890};
    891
    892static const struct synps_platform_data zynqmp_edac_def = {
    893	.get_error_info	= zynqmp_get_error_info,
    894	.get_mtype	= zynqmp_get_mtype,
    895	.get_dtype	= zynqmp_get_dtype,
    896	.get_ecc_state	= zynqmp_get_ecc_state,
    897	.quirks         = (DDR_ECC_INTR_SUPPORT
    898#ifdef CONFIG_EDAC_DEBUG
    899			  | DDR_ECC_DATA_POISON_SUPPORT
    900#endif
    901			  ),
    902};
    903
    904static const struct synps_platform_data synopsys_edac_def = {
    905	.get_error_info	= zynqmp_get_error_info,
    906	.get_mtype	= zynqmp_get_mtype,
    907	.get_dtype	= zynqmp_get_dtype,
    908	.get_ecc_state	= zynqmp_get_ecc_state,
    909	.quirks         = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
    910#ifdef CONFIG_EDAC_DEBUG
    911			  | DDR_ECC_DATA_POISON_SUPPORT
    912#endif
    913			  ),
    914};
    915
    916
    917static const struct of_device_id synps_edac_match[] = {
    918	{
    919		.compatible = "xlnx,zynq-ddrc-a05",
    920		.data = (void *)&zynq_edac_def
    921	},
    922	{
    923		.compatible = "xlnx,zynqmp-ddrc-2.40a",
    924		.data = (void *)&zynqmp_edac_def
    925	},
    926	{
    927		.compatible = "snps,ddrc-3.80a",
    928		.data = (void *)&synopsys_edac_def
    929	},
    930	{
    931		/* end of table */
    932	}
    933};
    934
    935MODULE_DEVICE_TABLE(of, synps_edac_match);
    936
    937#ifdef CONFIG_EDAC_DEBUG
    938#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
    939
    940/**
    941 * ddr_poison_setup -	Update poison registers.
    942 * @priv:		DDR memory controller private instance data.
    943 *
    944 * Update poison registers as per DDR mapping.
    945 * Return: none.
    946 */
    947static void ddr_poison_setup(struct synps_edac_priv *priv)
    948{
    949	int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
    950	int index;
    951	ulong hif_addr = 0;
    952
    953	hif_addr = priv->poison_addr >> 3;
    954
    955	for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
    956		if (priv->row_shift[index])
    957			row |= (((hif_addr >> priv->row_shift[index]) &
    958						BIT(0)) << index);
    959		else
    960			break;
    961	}
    962
    963	for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
    964		if (priv->col_shift[index] || index < 3)
    965			col |= (((hif_addr >> priv->col_shift[index]) &
    966						BIT(0)) << index);
    967		else
    968			break;
    969	}
    970
    971	for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
    972		if (priv->bank_shift[index])
    973			bank |= (((hif_addr >> priv->bank_shift[index]) &
    974						BIT(0)) << index);
    975		else
    976			break;
    977	}
    978
    979	for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
    980		if (priv->bankgrp_shift[index])
    981			bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
    982						& BIT(0)) << index);
    983		else
    984			break;
    985	}
    986
    987	if (priv->rank_shift[0])
    988		rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
    989
    990	regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
    991	regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
    992	writel(regval, priv->baseaddr + ECC_POISON0_OFST);
    993
    994	regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
    995	regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
    996	regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
    997	writel(regval, priv->baseaddr + ECC_POISON1_OFST);
    998}
    999
   1000static ssize_t inject_data_error_show(struct device *dev,
   1001				      struct device_attribute *mattr,
   1002				      char *data)
   1003{
   1004	struct mem_ctl_info *mci = to_mci(dev);
   1005	struct synps_edac_priv *priv = mci->pvt_info;
   1006
   1007	return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
   1008			"Error injection Address: 0x%lx\n\r",
   1009			readl(priv->baseaddr + ECC_POISON0_OFST),
   1010			readl(priv->baseaddr + ECC_POISON1_OFST),
   1011			priv->poison_addr);
   1012}
   1013
   1014static ssize_t inject_data_error_store(struct device *dev,
   1015				       struct device_attribute *mattr,
   1016				       const char *data, size_t count)
   1017{
   1018	struct mem_ctl_info *mci = to_mci(dev);
   1019	struct synps_edac_priv *priv = mci->pvt_info;
   1020
   1021	if (kstrtoul(data, 0, &priv->poison_addr))
   1022		return -EINVAL;
   1023
   1024	ddr_poison_setup(priv);
   1025
   1026	return count;
   1027}
   1028
   1029static ssize_t inject_data_poison_show(struct device *dev,
   1030				       struct device_attribute *mattr,
   1031				       char *data)
   1032{
   1033	struct mem_ctl_info *mci = to_mci(dev);
   1034	struct synps_edac_priv *priv = mci->pvt_info;
   1035
   1036	return sprintf(data, "Data Poisoning: %s\n\r",
   1037			(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
   1038			? ("Correctable Error") : ("UnCorrectable Error"));
   1039}
   1040
   1041static ssize_t inject_data_poison_store(struct device *dev,
   1042					struct device_attribute *mattr,
   1043					const char *data, size_t count)
   1044{
   1045	struct mem_ctl_info *mci = to_mci(dev);
   1046	struct synps_edac_priv *priv = mci->pvt_info;
   1047
   1048	writel(0, priv->baseaddr + DDRC_SWCTL);
   1049	if (strncmp(data, "CE", 2) == 0)
   1050		writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
   1051	else
   1052		writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
   1053	writel(1, priv->baseaddr + DDRC_SWCTL);
   1054
   1055	return count;
   1056}
   1057
   1058static DEVICE_ATTR_RW(inject_data_error);
   1059static DEVICE_ATTR_RW(inject_data_poison);
   1060
   1061static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
   1062{
   1063	int rc;
   1064
   1065	rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
   1066	if (rc < 0)
   1067		return rc;
   1068	rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
   1069	if (rc < 0)
   1070		return rc;
   1071	return 0;
   1072}
   1073
   1074static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
   1075{
   1076	device_remove_file(&mci->dev, &dev_attr_inject_data_error);
   1077	device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
   1078}
   1079
   1080static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
   1081{
   1082	u32 addrmap_row_b2_10;
   1083	int index;
   1084
   1085	priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
   1086	priv->row_shift[1] = ((addrmap[5] >> 8) &
   1087			ROW_MAX_VAL_MASK) + ROW_B1_BASE;
   1088
   1089	addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
   1090	if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
   1091		for (index = 2; index < 11; index++)
   1092			priv->row_shift[index] = addrmap_row_b2_10 +
   1093				index + ROW_B0_BASE;
   1094
   1095	} else {
   1096		priv->row_shift[2] = (addrmap[9] &
   1097				ROW_MAX_VAL_MASK) + ROW_B2_BASE;
   1098		priv->row_shift[3] = ((addrmap[9] >> 8) &
   1099				ROW_MAX_VAL_MASK) + ROW_B3_BASE;
   1100		priv->row_shift[4] = ((addrmap[9] >> 16) &
   1101				ROW_MAX_VAL_MASK) + ROW_B4_BASE;
   1102		priv->row_shift[5] = ((addrmap[9] >> 24) &
   1103				ROW_MAX_VAL_MASK) + ROW_B5_BASE;
   1104		priv->row_shift[6] = (addrmap[10] &
   1105				ROW_MAX_VAL_MASK) + ROW_B6_BASE;
   1106		priv->row_shift[7] = ((addrmap[10] >> 8) &
   1107				ROW_MAX_VAL_MASK) + ROW_B7_BASE;
   1108		priv->row_shift[8] = ((addrmap[10] >> 16) &
   1109				ROW_MAX_VAL_MASK) + ROW_B8_BASE;
   1110		priv->row_shift[9] = ((addrmap[10] >> 24) &
   1111				ROW_MAX_VAL_MASK) + ROW_B9_BASE;
   1112		priv->row_shift[10] = (addrmap[11] &
   1113				ROW_MAX_VAL_MASK) + ROW_B10_BASE;
   1114	}
   1115
   1116	priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
   1117				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
   1118				ROW_MAX_VAL_MASK) + ROW_B11_BASE);
   1119	priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
   1120				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
   1121				ROW_MAX_VAL_MASK) + ROW_B12_BASE);
   1122	priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
   1123				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
   1124				ROW_MAX_VAL_MASK) + ROW_B13_BASE);
   1125	priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
   1126				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
   1127				ROW_MAX_VAL_MASK) + ROW_B14_BASE);
   1128	priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
   1129				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
   1130				ROW_MAX_VAL_MASK) + ROW_B15_BASE);
   1131	priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
   1132				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
   1133				ROW_MAX_VAL_MASK) + ROW_B16_BASE);
   1134	priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
   1135				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
   1136				ROW_MAX_VAL_MASK) + ROW_B17_BASE);
   1137}
   1138
   1139static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
   1140{
   1141	u32 width, memtype;
   1142	int index;
   1143
   1144	memtype = readl(priv->baseaddr + CTRL_OFST);
   1145	width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
   1146
   1147	priv->col_shift[0] = 0;
   1148	priv->col_shift[1] = 1;
   1149	priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
   1150	priv->col_shift[3] = ((addrmap[2] >> 8) &
   1151			COL_MAX_VAL_MASK) + COL_B3_BASE;
   1152	priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
   1153			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
   1154					COL_MAX_VAL_MASK) + COL_B4_BASE);
   1155	priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
   1156			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
   1157					COL_MAX_VAL_MASK) + COL_B5_BASE);
   1158	priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
   1159			COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
   1160					COL_MAX_VAL_MASK) + COL_B6_BASE);
   1161	priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
   1162			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
   1163					COL_MAX_VAL_MASK) + COL_B7_BASE);
   1164	priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
   1165			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
   1166					COL_MAX_VAL_MASK) + COL_B8_BASE);
   1167	priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
   1168			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
   1169					COL_MAX_VAL_MASK) + COL_B9_BASE);
   1170	if (width == DDRCTL_EWDTH_64) {
   1171		if (memtype & MEM_TYPE_LPDDR3) {
   1172			priv->col_shift[10] = ((addrmap[4] &
   1173				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1174				((addrmap[4] & COL_MAX_VAL_MASK) +
   1175				 COL_B10_BASE);
   1176			priv->col_shift[11] = (((addrmap[4] >> 8) &
   1177				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1178				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
   1179				 COL_B11_BASE);
   1180		} else {
   1181			priv->col_shift[11] = ((addrmap[4] &
   1182				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1183				((addrmap[4] & COL_MAX_VAL_MASK) +
   1184				 COL_B10_BASE);
   1185			priv->col_shift[13] = (((addrmap[4] >> 8) &
   1186				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1187				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
   1188				 COL_B11_BASE);
   1189		}
   1190	} else if (width == DDRCTL_EWDTH_32) {
   1191		if (memtype & MEM_TYPE_LPDDR3) {
   1192			priv->col_shift[10] = (((addrmap[3] >> 24) &
   1193				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1194				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
   1195				 COL_B9_BASE);
   1196			priv->col_shift[11] = ((addrmap[4] &
   1197				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1198				((addrmap[4] & COL_MAX_VAL_MASK) +
   1199				 COL_B10_BASE);
   1200		} else {
   1201			priv->col_shift[11] = (((addrmap[3] >> 24) &
   1202				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1203				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
   1204				 COL_B9_BASE);
   1205			priv->col_shift[13] = ((addrmap[4] &
   1206				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1207				((addrmap[4] & COL_MAX_VAL_MASK) +
   1208				 COL_B10_BASE);
   1209		}
   1210	} else {
   1211		if (memtype & MEM_TYPE_LPDDR3) {
   1212			priv->col_shift[10] = (((addrmap[3] >> 16) &
   1213				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1214				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
   1215				 COL_B8_BASE);
   1216			priv->col_shift[11] = (((addrmap[3] >> 24) &
   1217				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1218				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
   1219				 COL_B9_BASE);
   1220			priv->col_shift[13] = ((addrmap[4] &
   1221				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1222				((addrmap[4] & COL_MAX_VAL_MASK) +
   1223				 COL_B10_BASE);
   1224		} else {
   1225			priv->col_shift[11] = (((addrmap[3] >> 16) &
   1226				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1227				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
   1228				 COL_B8_BASE);
   1229			priv->col_shift[13] = (((addrmap[3] >> 24) &
   1230				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
   1231				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
   1232				 COL_B9_BASE);
   1233		}
   1234	}
   1235
   1236	if (width) {
   1237		for (index = 9; index > width; index--) {
   1238			priv->col_shift[index] = priv->col_shift[index - width];
   1239			priv->col_shift[index - width] = 0;
   1240		}
   1241	}
   1242
   1243}
   1244
   1245static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
   1246{
   1247	priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
   1248	priv->bank_shift[1] = ((addrmap[1] >> 8) &
   1249				BANK_MAX_VAL_MASK) + BANK_B1_BASE;
   1250	priv->bank_shift[2] = (((addrmap[1] >> 16) &
   1251				BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
   1252				(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
   1253				 BANK_B2_BASE);
   1254
   1255}
   1256
   1257static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
   1258{
   1259	priv->bankgrp_shift[0] = (addrmap[8] &
   1260				BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
   1261	priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
   1262				BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
   1263				& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
   1264
   1265}
   1266
   1267static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
   1268{
   1269	priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
   1270				RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
   1271				RANK_MAX_VAL_MASK) + RANK_B0_BASE);
   1272}
   1273
   1274/**
   1275 * setup_address_map -	Set Address Map by querying ADDRMAP registers.
   1276 * @priv:		DDR memory controller private instance data.
   1277 *
   1278 * Set Address Map by querying ADDRMAP registers.
   1279 *
   1280 * Return: none.
   1281 */
   1282static void setup_address_map(struct synps_edac_priv *priv)
   1283{
   1284	u32 addrmap[12];
   1285	int index;
   1286
   1287	for (index = 0; index < 12; index++) {
   1288		u32 addrmap_offset;
   1289
   1290		addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
   1291		addrmap[index] = readl(priv->baseaddr + addrmap_offset);
   1292	}
   1293
   1294	setup_row_address_map(priv, addrmap);
   1295
   1296	setup_column_address_map(priv, addrmap);
   1297
   1298	setup_bank_address_map(priv, addrmap);
   1299
   1300	setup_bg_address_map(priv, addrmap);
   1301
   1302	setup_rank_address_map(priv, addrmap);
   1303}
   1304#endif /* CONFIG_EDAC_DEBUG */
   1305
   1306/**
   1307 * mc_probe - Check controller and bind driver.
   1308 * @pdev:	platform device.
   1309 *
   1310 * Probe a specific controller instance for binding with the driver.
   1311 *
   1312 * Return: 0 if the controller instance was successfully bound to the
   1313 * driver; otherwise, < 0 on error.
   1314 */
   1315static int mc_probe(struct platform_device *pdev)
   1316{
   1317	const struct synps_platform_data *p_data;
   1318	struct edac_mc_layer layers[2];
   1319	struct synps_edac_priv *priv;
   1320	struct mem_ctl_info *mci;
   1321	void __iomem *baseaddr;
   1322	struct resource *res;
   1323	int rc;
   1324
   1325	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1326	baseaddr = devm_ioremap_resource(&pdev->dev, res);
   1327	if (IS_ERR(baseaddr))
   1328		return PTR_ERR(baseaddr);
   1329
   1330	p_data = of_device_get_match_data(&pdev->dev);
   1331	if (!p_data)
   1332		return -ENODEV;
   1333
   1334	if (!p_data->get_ecc_state(baseaddr)) {
   1335		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
   1336		return -ENXIO;
   1337	}
   1338
   1339	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
   1340	layers[0].size = SYNPS_EDAC_NR_CSROWS;
   1341	layers[0].is_virt_csrow = true;
   1342	layers[1].type = EDAC_MC_LAYER_CHANNEL;
   1343	layers[1].size = SYNPS_EDAC_NR_CHANS;
   1344	layers[1].is_virt_csrow = false;
   1345
   1346	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
   1347			    sizeof(struct synps_edac_priv));
   1348	if (!mci) {
   1349		edac_printk(KERN_ERR, EDAC_MC,
   1350			    "Failed memory allocation for mc instance\n");
   1351		return -ENOMEM;
   1352	}
   1353
   1354	priv = mci->pvt_info;
   1355	priv->baseaddr = baseaddr;
   1356	priv->p_data = p_data;
   1357
   1358	mc_init(mci, pdev);
   1359
   1360	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
   1361		rc = setup_irq(mci, pdev);
   1362		if (rc)
   1363			goto free_edac_mc;
   1364	}
   1365
   1366	rc = edac_mc_add_mc(mci);
   1367	if (rc) {
   1368		edac_printk(KERN_ERR, EDAC_MC,
   1369			    "Failed to register with EDAC core\n");
   1370		goto free_edac_mc;
   1371	}
   1372
   1373#ifdef CONFIG_EDAC_DEBUG
   1374	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
   1375		rc = edac_create_sysfs_attributes(mci);
   1376		if (rc) {
   1377			edac_printk(KERN_ERR, EDAC_MC,
   1378					"Failed to create sysfs entries\n");
   1379			goto free_edac_mc;
   1380		}
   1381	}
   1382
   1383	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
   1384		setup_address_map(priv);
   1385#endif
   1386
   1387	/*
   1388	 * Start capturing the correctable and uncorrectable errors. A write of
   1389	 * 0 starts the counters.
   1390	 */
   1391	if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
   1392		writel(0x0, baseaddr + ECC_CTRL_OFST);
   1393
   1394	return rc;
   1395
   1396free_edac_mc:
   1397	edac_mc_free(mci);
   1398
   1399	return rc;
   1400}
   1401
   1402/**
   1403 * mc_remove - Unbind driver from controller.
   1404 * @pdev:	Platform device.
   1405 *
   1406 * Return: Unconditionally 0
   1407 */
   1408static int mc_remove(struct platform_device *pdev)
   1409{
   1410	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
   1411	struct synps_edac_priv *priv = mci->pvt_info;
   1412
   1413	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
   1414		disable_intr(priv);
   1415
   1416#ifdef CONFIG_EDAC_DEBUG
   1417	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
   1418		edac_remove_sysfs_attributes(mci);
   1419#endif
   1420
   1421	edac_mc_del_mc(&pdev->dev);
   1422	edac_mc_free(mci);
   1423
   1424	return 0;
   1425}
   1426
   1427static struct platform_driver synps_edac_mc_driver = {
   1428	.driver = {
   1429		   .name = "synopsys-edac",
   1430		   .of_match_table = synps_edac_match,
   1431		   },
   1432	.probe = mc_probe,
   1433	.remove = mc_remove,
   1434};
   1435
   1436module_platform_driver(synps_edac_mc_driver);
   1437
   1438MODULE_AUTHOR("Xilinx Inc");
   1439MODULE_DESCRIPTION("Synopsys DDR ECC driver");
   1440MODULE_LICENSE("GPL v2");