cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

renesas-nand-controller.c (39434B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
      4 *
      5 * Copyright (C) 2021 Schneider Electric
      6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
      7 */
      8
      9#include <linux/bitfield.h>
     10#include <linux/clk.h>
     11#include <linux/dma-mapping.h>
     12#include <linux/interrupt.h>
     13#include <linux/iopoll.h>
     14#include <linux/module.h>
     15#include <linux/mtd/mtd.h>
     16#include <linux/mtd/rawnand.h>
     17#include <linux/of.h>
     18#include <linux/platform_device.h>
     19#include <linux/pm_runtime.h>
     20#include <linux/slab.h>
     21
     22#define COMMAND_REG 0x00
     23#define   COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
     24#define     COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
     25#define     COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
     26#define     COMMAND_SEQ_18 COMMAND_SEQ(0x32)
     27#define     COMMAND_SEQ_19 COMMAND_SEQ(0x13)
     28#define     COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
     29#define     COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
     30#define     COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
     31#define     COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
     32#define   COMMAND_INPUT_SEL_AHBS 0
     33#define   COMMAND_INPUT_SEL_DMA BIT(6)
     34#define   COMMAND_FIFO_SEL 0
     35#define   COMMAND_DATA_SEL BIT(7)
     36#define   COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
     37#define   COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
     38#define   COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
     39
     40#define CONTROL_REG 0x04
     41#define   CONTROL_CHECK_RB_LINE 0
     42#define   CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
     43#define     CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
     44#define     CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
     45#define     CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
     46#define   CONTROL_INT_EN BIT(4)
     47#define   CONTROL_ECC_EN BIT(5)
     48#define   CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
     49#define     CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
     50#define     CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
     51#define     CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
     52#define     CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
     53
     54#define STATUS_REG 0x8
     55#define   MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
     56#define   CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
     57
     58#define ECC_CTRL_REG 0x18
     59#define   ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
     60#define     ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
     61#define     ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
     62#define     ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
     63#define     ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
     64#define     ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
     65#define     ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
     66#define   ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
     67
     68#define INT_MASK_REG 0x10
     69#define INT_STATUS_REG 0x14
     70#define   INT_CMD_END BIT(1)
     71#define   INT_DMA_END BIT(3)
     72#define   INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
     73#define   INT_DMA_ENDED BIT(3)
     74#define   MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
     75#define   DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
     76
     77#define ECC_OFFSET_REG 0x1C
     78#define   ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
     79
     80#define ECC_STAT_REG 0x20
     81#define   ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
     82#define   ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
     83
     84#define ADDR0_COL_REG 0x24
     85#define   ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
     86
     87#define ADDR0_ROW_REG 0x28
     88#define   ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
     89
     90#define ADDR1_COL_REG 0x2C
     91#define   ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
     92
     93#define ADDR1_ROW_REG 0x30
     94#define   ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
     95
     96#define FIFO_DATA_REG 0x38
     97
     98#define DATA_REG 0x3C
     99
    100#define DATA_REG_SIZE_REG 0x40
    101
    102#define DMA_ADDR_LOW_REG 0x64
    103
    104#define DMA_ADDR_HIGH_REG 0x68
    105
    106#define DMA_CNT_REG 0x6C
    107
    108#define DMA_CTRL_REG 0x70
    109#define   DMA_CTRL_INCREMENT_BURST_4 0
    110#define   DMA_CTRL_REGISTER_MANAGED_MODE 0
    111#define   DMA_CTRL_START BIT(7)
    112
    113#define MEM_CTRL_REG 0x80
    114#define   MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
    115#define   MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
    116
    117#define DATA_SIZE_REG 0x84
    118#define   DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
    119
    120#define TIMINGS_ASYN_REG 0x88
    121#define   TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
    122#define   TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
    123
    124#define TIM_SEQ0_REG 0x90
    125#define   TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
    126#define   TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
    127#define   TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
    128#define   TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
    129
    130#define TIM_SEQ1_REG 0x94
    131#define   TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
    132#define   TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
    133#define   TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
    134
    135#define TIM_GEN_SEQ0_REG 0x98
    136#define   TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
    137#define   TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
    138#define   TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
    139#define   TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
    140
    141#define TIM_GEN_SEQ1_REG 0x9c
    142#define   TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
    143#define   TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
    144#define   TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
    145#define   TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
    146
    147#define TIM_GEN_SEQ2_REG 0xA0
    148#define   TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
    149#define   TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
    150#define   TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
    151#define   TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
    152
    153#define FIFO_INIT_REG 0xB4
    154#define   FIFO_INIT BIT(0)
    155
    156#define FIFO_STATE_REG 0xB4
    157#define   FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
    158#define   FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
    159#define   FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
    160#define   FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
    161#define   FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
    162
    163#define GEN_SEQ_CTRL_REG 0xB8
    164#define   GEN_SEQ_CMD0_EN BIT(0)
    165#define   GEN_SEQ_CMD1_EN BIT(1)
    166#define   GEN_SEQ_CMD2_EN BIT(2)
    167#define   GEN_SEQ_CMD3_EN BIT(3)
    168#define   GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
    169#define   GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
    170#define   GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
    171#define   GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
    172#define   GEN_SEQ_DATA_EN BIT(12)
    173#define   GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
    174#define     GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
    175#define     GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
    176#define   GEN_SEQ_IMD_SEQ BIT(15)
    177#define   GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
    178
    179#define DMA_TLVL_REG 0x114
    180#define   DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
    181#define   DMA_TLVL_MAX DMA_TLVL(0xFF)
    182
    183#define TIM_GEN_SEQ3_REG 0x134
    184#define   TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
    185
    186#define ECC_CNT_REG 0x14C
    187#define   ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
    188
    189#define RNANDC_CS_NUM 4
    190
    191#define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
    192								   period_ns))
    193
    194struct rnand_chip_sel {
    195	unsigned int cs;
    196};
    197
    198struct rnand_chip {
    199	struct nand_chip chip;
    200	struct list_head node;
    201	int selected_die;
    202	u32 ctrl;
    203	unsigned int nsels;
    204	u32 control;
    205	u32 ecc_ctrl;
    206	u32 timings_asyn;
    207	u32 tim_seq0;
    208	u32 tim_seq1;
    209	u32 tim_gen_seq0;
    210	u32 tim_gen_seq1;
    211	u32 tim_gen_seq2;
    212	u32 tim_gen_seq3;
    213	struct rnand_chip_sel sels[];
    214};
    215
    216struct rnandc {
    217	struct nand_controller controller;
    218	struct device *dev;
    219	void __iomem *regs;
    220	unsigned long ext_clk_rate;
    221	unsigned long assigned_cs;
    222	struct list_head chips;
    223	struct nand_chip *selected_chip;
    224	struct completion complete;
    225	bool use_polling;
    226	u8 *buf;
    227	unsigned int buf_sz;
    228};
    229
    230struct rnandc_op {
    231	u32 command;
    232	u32 addr0_col;
    233	u32 addr0_row;
    234	u32 addr1_col;
    235	u32 addr1_row;
    236	u32 data_size;
    237	u32 ecc_offset;
    238	u32 gen_seq_ctrl;
    239	u8 *buf;
    240	bool read;
    241	unsigned int len;
    242};
    243
    244static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
    245{
    246	return container_of(ctrl, struct rnandc, controller);
    247}
    248
    249static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
    250{
    251	return container_of(chip, struct rnand_chip, chip);
    252}
    253
    254static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
    255{
    256	return nand->sels[nand->selected_die].cs;
    257}
    258
    259static void rnandc_dis_correction(struct rnandc *rnandc)
    260{
    261	u32 control;
    262
    263	control = readl_relaxed(rnandc->regs + CONTROL_REG);
    264	control &= ~CONTROL_ECC_EN;
    265	writel_relaxed(control, rnandc->regs + CONTROL_REG);
    266}
    267
    268static void rnandc_en_correction(struct rnandc *rnandc)
    269{
    270	u32 control;
    271
    272	control = readl_relaxed(rnandc->regs + CONTROL_REG);
    273	control |= CONTROL_ECC_EN;
    274	writel_relaxed(control, rnandc->regs + CONTROL_REG);
    275}
    276
    277static void rnandc_clear_status(struct rnandc *rnandc)
    278{
    279	writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
    280	writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
    281	writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
    282}
    283
    284static void rnandc_dis_interrupts(struct rnandc *rnandc)
    285{
    286	writel_relaxed(0, rnandc->regs + INT_MASK_REG);
    287}
    288
    289static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
    290{
    291	if (!rnandc->use_polling)
    292		writel_relaxed(val, rnandc->regs + INT_MASK_REG);
    293}
    294
    295static void rnandc_clear_fifo(struct rnandc *rnandc)
    296{
    297	writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
    298}
    299
    300static void rnandc_select_target(struct nand_chip *chip, int die_nr)
    301{
    302	struct rnand_chip *rnand = to_rnand(chip);
    303	struct rnandc *rnandc = to_rnandc(chip->controller);
    304	unsigned int cs = rnand->sels[die_nr].cs;
    305
    306	if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
    307		return;
    308
    309	rnandc_clear_status(rnandc);
    310	writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
    311	writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
    312	writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
    313	writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
    314	writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
    315	writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
    316	writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
    317	writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
    318	writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
    319	writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
    320
    321	rnandc->selected_chip = chip;
    322	rnand->selected_die = die_nr;
    323}
    324
    325static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
    326{
    327	writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
    328	writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
    329	writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
    330	writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
    331	writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
    332	writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
    333	writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
    334	writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
    335}
    336
    337static void rnandc_trigger_dma(struct rnandc *rnandc)
    338{
    339	writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
    340		       DMA_CTRL_REGISTER_MANAGED_MODE |
    341		       DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
    342}
    343
    344static irqreturn_t rnandc_irq_handler(int irq, void *private)
    345{
    346	struct rnandc *rnandc = private;
    347
    348	rnandc_dis_interrupts(rnandc);
    349	complete(&rnandc->complete);
    350
    351	return IRQ_HANDLED;
    352}
    353
    354static int rnandc_wait_end_of_op(struct rnandc *rnandc,
    355				 struct nand_chip *chip)
    356{
    357	struct rnand_chip *rnand = to_rnand(chip);
    358	unsigned int cs = to_rnandc_cs(rnand);
    359	u32 status;
    360	int ret;
    361
    362	ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
    363				 MEM_RDY(cs, status) && CTRL_RDY(status),
    364				 1, 100000);
    365	if (ret)
    366		dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
    367			status);
    368
    369	return ret;
    370}
    371
    372static int rnandc_wait_end_of_io(struct rnandc *rnandc,
    373				 struct nand_chip *chip)
    374{
    375	int timeout_ms = 1000;
    376	int ret;
    377
    378	if (rnandc->use_polling) {
    379		struct rnand_chip *rnand = to_rnand(chip);
    380		unsigned int cs = to_rnandc_cs(rnand);
    381		u32 status;
    382
    383		ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
    384					 MEM_IS_RDY(cs, status) &
    385					 DMA_HAS_ENDED(status),
    386					 0, timeout_ms * 1000);
    387	} else {
    388		ret = wait_for_completion_timeout(&rnandc->complete,
    389						  msecs_to_jiffies(timeout_ms));
    390		if (!ret)
    391			ret = -ETIMEDOUT;
    392		else
    393			ret = 0;
    394	}
    395
    396	return ret;
    397}
    398
    399static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
    400				   int oob_required, int page)
    401{
    402	struct rnandc *rnandc = to_rnandc(chip->controller);
    403	struct mtd_info *mtd = nand_to_mtd(chip);
    404	struct rnand_chip *rnand = to_rnand(chip);
    405	unsigned int cs = to_rnandc_cs(rnand);
    406	struct rnandc_op rop = {
    407		.command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
    408			   COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
    409			   COMMAND_SEQ_READ_PAGE,
    410		.addr0_row = page,
    411		.len = mtd->writesize,
    412		.ecc_offset = ECC_OFFSET(mtd->writesize + 2),
    413	};
    414	unsigned int max_bitflips = 0;
    415	dma_addr_t dma_addr;
    416	u32 ecc_stat;
    417	int bf, ret, i;
    418
    419	/* Prepare controller */
    420	rnandc_select_target(chip, chip->cur_cs);
    421	rnandc_clear_status(rnandc);
    422	reinit_completion(&rnandc->complete);
    423	rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
    424	rnandc_en_correction(rnandc);
    425
    426	/* Configure DMA */
    427	dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
    428				  DMA_FROM_DEVICE);
    429	writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
    430	writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
    431	writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
    432
    433	rnandc_trigger_op(rnandc, &rop);
    434	rnandc_trigger_dma(rnandc);
    435
    436	ret = rnandc_wait_end_of_io(rnandc, chip);
    437	dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
    438	rnandc_dis_correction(rnandc);
    439	if (ret) {
    440		dev_err(rnandc->dev, "Read page operation never ending\n");
    441		return ret;
    442	}
    443
    444	ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
    445
    446	if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
    447		ret = nand_change_read_column_op(chip, mtd->writesize,
    448						 chip->oob_poi, mtd->oobsize,
    449						 false);
    450		if (ret)
    451			return ret;
    452	}
    453
    454	if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
    455		for (i = 0; i < chip->ecc.steps; i++) {
    456			unsigned int off = i * chip->ecc.size;
    457			unsigned int eccoff = i * chip->ecc.bytes;
    458
    459			bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
    460							 chip->ecc.size,
    461							 chip->oob_poi + 2 + eccoff,
    462							 chip->ecc.bytes,
    463							 NULL, 0,
    464							 chip->ecc.strength);
    465			if (bf < 0) {
    466				mtd->ecc_stats.failed++;
    467			} else {
    468				mtd->ecc_stats.corrected += bf;
    469				max_bitflips = max_t(unsigned int, max_bitflips, bf);
    470			}
    471		}
    472	} else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
    473		bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
    474		/*
    475		 * The number of bitflips is an approximation given the fact
    476		 * that this controller does not provide per-chunk details but
    477		 * only gives statistics on the entire page.
    478		 */
    479		mtd->ecc_stats.corrected += bf;
    480	}
    481
    482	memcpy(buf, rnandc->buf, mtd->writesize);
    483
    484	return 0;
    485}
    486
    487static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
    488				      u32 req_len, u8 *bufpoi, int page)
    489{
    490	struct rnandc *rnandc = to_rnandc(chip->controller);
    491	struct mtd_info *mtd = nand_to_mtd(chip);
    492	struct rnand_chip *rnand = to_rnand(chip);
    493	unsigned int cs = to_rnandc_cs(rnand);
    494	unsigned int page_off = round_down(req_offset, chip->ecc.size);
    495	unsigned int real_len = round_up(req_offset + req_len - page_off,
    496					 chip->ecc.size);
    497	unsigned int start_chunk = page_off / chip->ecc.size;
    498	unsigned int nchunks = real_len / chip->ecc.size;
    499	unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
    500	struct rnandc_op rop = {
    501		.command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
    502			   COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
    503			   COMMAND_SEQ_READ_PAGE,
    504		.addr0_row = page,
    505		.addr0_col = page_off,
    506		.len = real_len,
    507		.ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
    508	};
    509	unsigned int max_bitflips = 0, i;
    510	u32 ecc_stat;
    511	int bf, ret;
    512
    513	/* Prepare controller */
    514	rnandc_select_target(chip, chip->cur_cs);
    515	rnandc_clear_status(rnandc);
    516	rnandc_en_correction(rnandc);
    517	rnandc_trigger_op(rnandc, &rop);
    518
    519	while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
    520		cpu_relax();
    521
    522	while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
    523		cpu_relax();
    524
    525	ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
    526		     real_len / 4);
    527
    528	if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
    529		dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
    530		rnandc_clear_fifo(rnandc);
    531	}
    532
    533	ret = rnandc_wait_end_of_op(rnandc, chip);
    534	rnandc_dis_correction(rnandc);
    535	if (ret) {
    536		dev_err(rnandc->dev, "Read subpage operation never ending\n");
    537		return ret;
    538	}
    539
    540	ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
    541
    542	if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
    543		ret = nand_change_read_column_op(chip, mtd->writesize,
    544						 chip->oob_poi, mtd->oobsize,
    545						 false);
    546		if (ret)
    547			return ret;
    548
    549		for (i = start_chunk; i < nchunks; i++) {
    550			unsigned int dataoff = i * chip->ecc.size;
    551			unsigned int eccoff = 2 + (i * chip->ecc.bytes);
    552
    553			bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
    554							 chip->ecc.size,
    555							 chip->oob_poi + eccoff,
    556							 chip->ecc.bytes,
    557							 NULL, 0,
    558							 chip->ecc.strength);
    559			if (bf < 0) {
    560				mtd->ecc_stats.failed++;
    561			} else {
    562				mtd->ecc_stats.corrected += bf;
    563				max_bitflips = max_t(unsigned int, max_bitflips, bf);
    564			}
    565		}
    566	} else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
    567		bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
    568		/*
    569		 * The number of bitflips is an approximation given the fact
    570		 * that this controller does not provide per-chunk details but
    571		 * only gives statistics on the entire page.
    572		 */
    573		mtd->ecc_stats.corrected += bf;
    574	}
    575
    576	return 0;
    577}
    578
    579static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
    580				    int oob_required, int page)
    581{
    582	struct rnandc *rnandc = to_rnandc(chip->controller);
    583	struct mtd_info *mtd = nand_to_mtd(chip);
    584	struct rnand_chip *rnand = to_rnand(chip);
    585	unsigned int cs = to_rnandc_cs(rnand);
    586	struct rnandc_op rop = {
    587		.command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
    588			   COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
    589			   COMMAND_SEQ_WRITE_PAGE,
    590		.addr0_row = page,
    591		.len = mtd->writesize,
    592		.ecc_offset = ECC_OFFSET(mtd->writesize + 2),
    593	};
    594	dma_addr_t dma_addr;
    595	int ret;
    596
    597	memcpy(rnandc->buf, buf, mtd->writesize);
    598
    599	/* Prepare controller */
    600	rnandc_select_target(chip, chip->cur_cs);
    601	rnandc_clear_status(rnandc);
    602	reinit_completion(&rnandc->complete);
    603	rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
    604	rnandc_en_correction(rnandc);
    605
    606	/* Configure DMA */
    607	dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
    608				  DMA_TO_DEVICE);
    609	writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
    610	writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
    611	writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
    612
    613	rnandc_trigger_op(rnandc, &rop);
    614	rnandc_trigger_dma(rnandc);
    615
    616	ret = rnandc_wait_end_of_io(rnandc, chip);
    617	dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
    618	rnandc_dis_correction(rnandc);
    619	if (ret) {
    620		dev_err(rnandc->dev, "Write page operation never ending\n");
    621		return ret;
    622	}
    623
    624	if (!oob_required)
    625		return 0;
    626
    627	return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
    628					   mtd->oobsize, false);
    629}
    630
    631static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
    632				       u32 req_len, const u8 *bufpoi,
    633				       int oob_required, int page)
    634{
    635	struct rnandc *rnandc = to_rnandc(chip->controller);
    636	struct mtd_info *mtd = nand_to_mtd(chip);
    637	unsigned int page_off = round_down(req_offset, chip->ecc.size);
    638	unsigned int real_len = round_up(req_offset + req_len - page_off,
    639					 chip->ecc.size);
    640	unsigned int start_chunk = page_off / chip->ecc.size;
    641	unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
    642	struct rnandc_op rop = {
    643		.command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
    644			   COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
    645			   COMMAND_SEQ_WRITE_PAGE,
    646		.addr0_row = page,
    647		.addr0_col = page_off,
    648		.len = real_len,
    649		.ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
    650	};
    651	int ret;
    652
    653	/* Prepare controller */
    654	rnandc_select_target(chip, chip->cur_cs);
    655	rnandc_clear_status(rnandc);
    656	rnandc_en_correction(rnandc);
    657	rnandc_trigger_op(rnandc, &rop);
    658
    659	while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
    660		cpu_relax();
    661
    662	iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
    663		      real_len / 4);
    664
    665	while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
    666		cpu_relax();
    667
    668	ret = rnandc_wait_end_of_op(rnandc, chip);
    669	rnandc_dis_correction(rnandc);
    670	if (ret) {
    671		dev_err(rnandc->dev, "Write subpage operation never ending\n");
    672		return ret;
    673	}
    674
    675	return 0;
    676}
    677
    678/*
    679 * This controller is simple enough and thus does not need to use the parser
    680 * provided by the core, instead, handle every situation here.
    681 */
    682static int rnandc_exec_op(struct nand_chip *chip,
    683			  const struct nand_operation *op, bool check_only)
    684{
    685	struct rnandc *rnandc = to_rnandc(chip->controller);
    686	const struct nand_op_instr *instr = NULL;
    687	struct rnandc_op rop = {
    688		.command = COMMAND_INPUT_SEL_AHBS,
    689		.gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
    690	};
    691	unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
    692		delay_phase = 0, delays = 0;
    693	unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
    694	const u8 *addrs;
    695	u32 last_bytes;
    696	int ret;
    697
    698	if (!check_only)
    699		rnandc_select_target(chip, op->cs);
    700
    701	for (op_id = 0; op_id < op->ninstrs; op_id++) {
    702		instr = &op->instrs[op_id];
    703
    704		nand_op_trace("  ", instr);
    705
    706		switch (instr->type) {
    707		case NAND_OP_CMD_INSTR:
    708			switch (cmd_phase++) {
    709			case 0:
    710				rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
    711				rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
    712				break;
    713			case 1:
    714				rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
    715				rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
    716				if (addr_phase == 0)
    717					addr_phase = 1;
    718				break;
    719			case 2:
    720				rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
    721				rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
    722				if (addr_phase <= 1)
    723					addr_phase = 2;
    724				break;
    725			case 3:
    726				rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
    727				rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
    728				if (addr_phase <= 1)
    729					addr_phase = 2;
    730				if (delay_phase == 0)
    731					delay_phase = 1;
    732				if (data_phase == 0)
    733					data_phase = 1;
    734				break;
    735			default:
    736				return -EOPNOTSUPP;
    737			}
    738			break;
    739
    740		case NAND_OP_ADDR_INSTR:
    741			addrs = instr->ctx.addr.addrs;
    742			naddrs = instr->ctx.addr.naddrs;
    743			if (naddrs > 5)
    744				return -EOPNOTSUPP;
    745
    746			col_addrs = min(2U, naddrs);
    747			row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
    748
    749			switch (addr_phase++) {
    750			case 0:
    751				for (i = 0; i < col_addrs; i++)
    752					rop.addr0_col |= addrs[i] << (i * 8);
    753				rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
    754
    755				for (i = 0; i < row_addrs; i++)
    756					rop.addr0_row |= addrs[2 + i] << (i * 8);
    757				rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
    758
    759				if (cmd_phase == 0)
    760					cmd_phase = 1;
    761				break;
    762			case 1:
    763				for (i = 0; i < col_addrs; i++)
    764					rop.addr1_col |= addrs[i] << (i * 8);
    765				rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
    766
    767				for (i = 0; i < row_addrs; i++)
    768					rop.addr1_row |= addrs[2 + i] << (i * 8);
    769				rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
    770
    771				if (cmd_phase <= 1)
    772					cmd_phase = 2;
    773				break;
    774			default:
    775				return -EOPNOTSUPP;
    776			}
    777			break;
    778
    779		case NAND_OP_DATA_IN_INSTR:
    780			rop.read = true;
    781			fallthrough;
    782		case NAND_OP_DATA_OUT_INSTR:
    783			rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
    784			rop.buf = instr->ctx.data.buf.in;
    785			rop.len = instr->ctx.data.len;
    786			rop.command |= COMMAND_FIFO_SEL;
    787
    788			switch (data_phase++) {
    789			case 0:
    790				if (cmd_phase <= 2)
    791					cmd_phase = 3;
    792				if (addr_phase <= 1)
    793					addr_phase = 2;
    794				if (delay_phase == 0)
    795					delay_phase = 1;
    796				break;
    797			default:
    798				return -EOPNOTSUPP;
    799			}
    800			break;
    801
    802		case NAND_OP_WAITRDY_INSTR:
    803			switch (delay_phase++) {
    804			case 0:
    805				rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
    806
    807				if (cmd_phase <= 2)
    808					cmd_phase = 3;
    809				break;
    810			case 1:
    811				rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
    812
    813				if (cmd_phase <= 3)
    814					cmd_phase = 4;
    815				if (data_phase == 0)
    816					data_phase = 1;
    817				break;
    818			default:
    819				return -EOPNOTSUPP;
    820			}
    821			break;
    822		}
    823	}
    824
    825	/*
    826	 * Sequence 19 is generic and dedicated to write operations.
    827	 * Sequence 18 is also generic and works for all other operations.
    828	 */
    829	if (rop.buf && !rop.read)
    830		rop.command |= COMMAND_SEQ_GEN_OUT;
    831	else
    832		rop.command |= COMMAND_SEQ_GEN_IN;
    833
    834	if (delays > 1) {
    835		dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
    836		return -EOPNOTSUPP;
    837	}
    838
    839	if (check_only)
    840		return 0;
    841
    842	rnandc_trigger_op(rnandc, &rop);
    843
    844	words = rop.len / sizeof(u32);
    845	remainder = rop.len % sizeof(u32);
    846	if (rop.buf && rop.read) {
    847		while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
    848			cpu_relax();
    849
    850		while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
    851			cpu_relax();
    852
    853		ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
    854		if (remainder) {
    855			last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
    856			memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
    857			       remainder);
    858		}
    859
    860		if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
    861			dev_warn(rnandc->dev,
    862				 "Clearing residual data in the read FIFO\n");
    863			rnandc_clear_fifo(rnandc);
    864		}
    865	} else if (rop.len && !rop.read) {
    866		while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
    867			cpu_relax();
    868
    869		iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
    870			      DIV_ROUND_UP(rop.len, 4));
    871
    872		if (remainder) {
    873			last_bytes = 0;
    874			memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
    875			writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
    876		}
    877
    878		while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
    879			cpu_relax();
    880	}
    881
    882	ret = rnandc_wait_end_of_op(rnandc, chip);
    883	if (ret)
    884		return ret;
    885
    886	return 0;
    887}
    888
    889static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
    890				  const struct nand_interface_config *conf)
    891{
    892	struct rnand_chip *rnand = to_rnand(chip);
    893	struct rnandc *rnandc = to_rnandc(chip->controller);
    894	unsigned int period_ns = 1000000000 / rnandc->ext_clk_rate;
    895	const struct nand_sdr_timings *sdr;
    896	unsigned int cyc, cle, ale, bef_dly, ca_to_data;
    897
    898	sdr = nand_get_sdr_timings(conf);
    899	if (IS_ERR(sdr))
    900		return PTR_ERR(sdr);
    901
    902	if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
    903		dev_err(rnandc->dev, "Read and write hold times must be identical\n");
    904		return -EINVAL;
    905	}
    906
    907	if (chipnr < 0)
    908		return 0;
    909
    910	rnand->timings_asyn =
    911		TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
    912		TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
    913	rnand->tim_seq0 =
    914		TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
    915		TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
    916		TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
    917		TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
    918	rnand->tim_seq1 =
    919		TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
    920		TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
    921		TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
    922
    923	cyc = sdr->tDS_min + sdr->tDH_min;
    924	cle = sdr->tCLH_min + sdr->tCLS_min;
    925	ale = sdr->tALH_min + sdr->tALS_min;
    926	bef_dly = sdr->tWB_max - sdr->tDH_min;
    927	ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
    928
    929	/*
    930	 * D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
    931	 * D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
    932	 * D2 = CMD -> DLY = tWB - tDH
    933	 * D3 = CMD -> DATA = tWHR + tREA - tDH
    934	 */
    935	rnand->tim_gen_seq0 =
    936		TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
    937		TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
    938		TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
    939		TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
    940
    941	/*
    942	 * D4 = ADDR -> CMD = tALH + tALS - 1 cyle
    943	 * D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
    944	 * D6 = ADDR -> DLY = tWB - tDH
    945	 * D7 = ADDR -> DATA = tWHR + tREA - tDH
    946	 */
    947	rnand->tim_gen_seq1 =
    948		TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
    949		TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
    950		TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
    951		TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
    952
    953	/*
    954	 * D8 = DLY -> DATA = tRR + tREA
    955	 * D9 = DLY -> CMD = tRR
    956	 * D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
    957	 * D11 = DATA -> DLY = tWB - tDH
    958	 */
    959	rnand->tim_gen_seq2 =
    960		TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
    961		TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
    962		TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
    963		TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
    964
    965	/* D12 = DATA -> END = tCLH - tDH */
    966	rnand->tim_gen_seq3 =
    967		TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
    968
    969	return 0;
    970}
    971
    972static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
    973				struct mtd_oob_region *oobregion)
    974{
    975	struct nand_chip *chip = mtd_to_nand(mtd);
    976	unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
    977
    978	if (section)
    979		return -ERANGE;
    980
    981	oobregion->offset = 2;
    982	oobregion->length = eccbytes;
    983
    984	return 0;
    985}
    986
    987static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
    988				 struct mtd_oob_region *oobregion)
    989{
    990	struct nand_chip *chip = mtd_to_nand(mtd);
    991	unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
    992
    993	if (section)
    994		return -ERANGE;
    995
    996	oobregion->offset = 2 + eccbytes;
    997	oobregion->length = mtd->oobsize - oobregion->offset;
    998
    999	return 0;
   1000}
   1001
   1002static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
   1003	.ecc = rnandc_ooblayout_ecc,
   1004	.free = rnandc_ooblayout_free,
   1005};
   1006
   1007static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
   1008{
   1009	struct rnand_chip *rnand = to_rnand(chip);
   1010	struct mtd_info *mtd = nand_to_mtd(chip);
   1011	struct rnandc *rnandc = to_rnandc(chip->controller);
   1012
   1013	if (mtd->writesize > SZ_16K) {
   1014		dev_err(rnandc->dev, "Unsupported page size\n");
   1015		return -EINVAL;
   1016	}
   1017
   1018	switch (chip->ecc.size) {
   1019	case SZ_256:
   1020		rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
   1021		break;
   1022	case SZ_512:
   1023		rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
   1024		break;
   1025	case SZ_1K:
   1026		rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
   1027		break;
   1028	default:
   1029		dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
   1030		return -EINVAL;
   1031	}
   1032
   1033	switch (chip->ecc.strength) {
   1034	case 2:
   1035		chip->ecc.bytes = 4;
   1036		rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
   1037		break;
   1038	case 4:
   1039		chip->ecc.bytes = 7;
   1040		rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
   1041		break;
   1042	case 8:
   1043		chip->ecc.bytes = 14;
   1044		rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
   1045		break;
   1046	case 16:
   1047		chip->ecc.bytes = 28;
   1048		rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
   1049		break;
   1050	case 24:
   1051		chip->ecc.bytes = 42;
   1052		rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
   1053		break;
   1054	case 32:
   1055		chip->ecc.bytes = 56;
   1056		rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
   1057		break;
   1058	default:
   1059		dev_err(rnandc->dev, "Unsupported ECC strength\n");
   1060		return -EINVAL;
   1061	}
   1062
   1063	rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
   1064
   1065	mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
   1066	chip->ecc.steps = mtd->writesize / chip->ecc.size;
   1067	chip->ecc.read_page = rnandc_read_page_hw_ecc;
   1068	chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
   1069	chip->ecc.write_page = rnandc_write_page_hw_ecc;
   1070	chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
   1071
   1072	return 0;
   1073}
   1074
   1075static int rnandc_ecc_init(struct nand_chip *chip)
   1076{
   1077	struct nand_ecc_ctrl *ecc = &chip->ecc;
   1078	const struct nand_ecc_props *requirements =
   1079		nanddev_get_ecc_requirements(&chip->base);
   1080	struct rnandc *rnandc = to_rnandc(chip->controller);
   1081	int ret;
   1082
   1083	if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
   1084	    (!ecc->size || !ecc->strength)) {
   1085		if (requirements->step_size && requirements->strength) {
   1086			ecc->size = requirements->step_size;
   1087			ecc->strength = requirements->strength;
   1088		} else {
   1089			dev_err(rnandc->dev, "No minimum ECC strength\n");
   1090			return -EINVAL;
   1091		}
   1092	}
   1093
   1094	switch (ecc->engine_type) {
   1095	case NAND_ECC_ENGINE_TYPE_ON_HOST:
   1096		ret = rnandc_hw_ecc_controller_init(chip);
   1097		if (ret)
   1098			return ret;
   1099		break;
   1100	case NAND_ECC_ENGINE_TYPE_NONE:
   1101	case NAND_ECC_ENGINE_TYPE_SOFT:
   1102	case NAND_ECC_ENGINE_TYPE_ON_DIE:
   1103		break;
   1104	default:
   1105		return -EINVAL;
   1106	}
   1107
   1108	return 0;
   1109}
   1110
   1111static int rnandc_attach_chip(struct nand_chip *chip)
   1112{
   1113	struct rnand_chip *rnand = to_rnand(chip);
   1114	struct rnandc *rnandc = to_rnandc(chip->controller);
   1115	struct mtd_info *mtd = nand_to_mtd(chip);
   1116	struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
   1117	int ret;
   1118
   1119	/* Do not store BBT bits in the OOB section as it is not protected */
   1120	if (chip->bbt_options & NAND_BBT_USE_FLASH)
   1121		chip->bbt_options |= NAND_BBT_NO_OOB;
   1122
   1123	if (mtd->writesize <= 512) {
   1124		dev_err(rnandc->dev, "Small page devices not supported\n");
   1125		return -EINVAL;
   1126	}
   1127
   1128	rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
   1129
   1130	switch (memorg->pages_per_eraseblock) {
   1131	case 32:
   1132		rnand->control |= CONTROL_BLOCK_SIZE_32P;
   1133		break;
   1134	case 64:
   1135		rnand->control |= CONTROL_BLOCK_SIZE_64P;
   1136		break;
   1137	case 128:
   1138		rnand->control |= CONTROL_BLOCK_SIZE_128P;
   1139		break;
   1140	case 256:
   1141		rnand->control |= CONTROL_BLOCK_SIZE_256P;
   1142		break;
   1143	default:
   1144		dev_err(rnandc->dev, "Unsupported memory organization\n");
   1145		return -EINVAL;
   1146	}
   1147
   1148	chip->options |= NAND_SUBPAGE_READ;
   1149
   1150	ret = rnandc_ecc_init(chip);
   1151	if (ret) {
   1152		dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
   1153		return ret;
   1154	}
   1155
   1156	/* Force an update of the configuration registers */
   1157	rnand->selected_die = -1;
   1158
   1159	return 0;
   1160}
   1161
   1162static const struct nand_controller_ops rnandc_ops = {
   1163	.attach_chip = rnandc_attach_chip,
   1164	.exec_op = rnandc_exec_op,
   1165	.setup_interface = rnandc_setup_interface,
   1166};
   1167
   1168static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
   1169				struct mtd_info *new_mtd)
   1170{
   1171	unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
   1172	struct rnand_chip *entry, *temp;
   1173	struct nand_chip *chip;
   1174	struct mtd_info *mtd;
   1175
   1176	list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
   1177		chip = &entry->chip;
   1178		mtd = nand_to_mtd(chip);
   1179		max_len = max(max_len, mtd->writesize + mtd->oobsize);
   1180	}
   1181
   1182	if (rnandc->buf && rnandc->buf_sz < max_len) {
   1183		devm_kfree(rnandc->dev, rnandc->buf);
   1184		rnandc->buf = NULL;
   1185	}
   1186
   1187	if (!rnandc->buf) {
   1188		rnandc->buf_sz = max_len;
   1189		rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
   1190					   GFP_KERNEL | GFP_DMA);
   1191		if (!rnandc->buf)
   1192			return -ENOMEM;
   1193	}
   1194
   1195	return 0;
   1196}
   1197
   1198static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
   1199{
   1200	struct rnand_chip *rnand;
   1201	struct mtd_info *mtd;
   1202	struct nand_chip *chip;
   1203	int nsels, ret, i;
   1204	u32 cs;
   1205
   1206	nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
   1207	if (nsels <= 0) {
   1208		ret = (nsels < 0) ? nsels : -EINVAL;
   1209		dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
   1210		return ret;
   1211	}
   1212
   1213	/* Alloc the driver's NAND chip structure */
   1214	rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
   1215			     GFP_KERNEL);
   1216	if (!rnand)
   1217		return -ENOMEM;
   1218
   1219	rnand->nsels = nsels;
   1220	rnand->selected_die = -1;
   1221
   1222	for (i = 0; i < nsels; i++) {
   1223		ret = of_property_read_u32_index(np, "reg", i, &cs);
   1224		if (ret) {
   1225			dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
   1226			return ret;
   1227		}
   1228
   1229		if (cs >= RNANDC_CS_NUM) {
   1230			dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
   1231			return -EINVAL;
   1232		}
   1233
   1234		if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
   1235			dev_err(rnandc->dev, "CS %d already assigned\n", cs);
   1236			return -EINVAL;
   1237		}
   1238
   1239		/*
   1240		 * No need to check for RB or WP properties, there is a 1:1
   1241		 * mandatory mapping with the CS.
   1242		 */
   1243		rnand->sels[i].cs = cs;
   1244	}
   1245
   1246	chip = &rnand->chip;
   1247	chip->controller = &rnandc->controller;
   1248	nand_set_flash_node(chip, np);
   1249
   1250	mtd = nand_to_mtd(chip);
   1251	mtd->dev.parent = rnandc->dev;
   1252	if (!mtd->name) {
   1253		dev_err(rnandc->dev, "Missing MTD label\n");
   1254		return -EINVAL;
   1255	}
   1256
   1257	ret = nand_scan(chip, rnand->nsels);
   1258	if (ret) {
   1259		dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
   1260		return ret;
   1261	}
   1262
   1263	ret = rnandc_alloc_dma_buf(rnandc, mtd);
   1264	if (ret)
   1265		goto cleanup_nand;
   1266
   1267	ret = mtd_device_register(mtd, NULL, 0);
   1268	if (ret) {
   1269		dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
   1270		goto cleanup_nand;
   1271	}
   1272
   1273	list_add_tail(&rnand->node, &rnandc->chips);
   1274
   1275	return 0;
   1276
   1277cleanup_nand:
   1278	nand_cleanup(chip);
   1279
   1280	return ret;
   1281}
   1282
   1283static void rnandc_chips_cleanup(struct rnandc *rnandc)
   1284{
   1285	struct rnand_chip *entry, *temp;
   1286	struct nand_chip *chip;
   1287	int ret;
   1288
   1289	list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
   1290		chip = &entry->chip;
   1291		ret = mtd_device_unregister(nand_to_mtd(chip));
   1292		WARN_ON(ret);
   1293		nand_cleanup(chip);
   1294		list_del(&entry->node);
   1295	}
   1296}
   1297
   1298static int rnandc_chips_init(struct rnandc *rnandc)
   1299{
   1300	struct device_node *np;
   1301	int ret;
   1302
   1303	for_each_child_of_node(rnandc->dev->of_node, np) {
   1304		ret = rnandc_chip_init(rnandc, np);
   1305		if (ret) {
   1306			of_node_put(np);
   1307			goto cleanup_chips;
   1308		}
   1309	}
   1310
   1311	return 0;
   1312
   1313cleanup_chips:
   1314	rnandc_chips_cleanup(rnandc);
   1315
   1316	return ret;
   1317}
   1318
   1319static int rnandc_probe(struct platform_device *pdev)
   1320{
   1321	struct rnandc *rnandc;
   1322	struct clk *eclk;
   1323	int irq, ret;
   1324
   1325	rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
   1326	if (!rnandc)
   1327		return -ENOMEM;
   1328
   1329	rnandc->dev = &pdev->dev;
   1330	nand_controller_init(&rnandc->controller);
   1331	rnandc->controller.ops = &rnandc_ops;
   1332	INIT_LIST_HEAD(&rnandc->chips);
   1333	init_completion(&rnandc->complete);
   1334
   1335	rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
   1336	if (IS_ERR(rnandc->regs))
   1337		return PTR_ERR(rnandc->regs);
   1338
   1339	devm_pm_runtime_enable(&pdev->dev);
   1340	ret = pm_runtime_resume_and_get(&pdev->dev);
   1341	if (ret < 0)
   1342		return ret;
   1343
   1344	/* The external NAND bus clock rate is needed for computing timings */
   1345	eclk = clk_get(&pdev->dev, "eclk");
   1346	if (IS_ERR(eclk)) {
   1347		ret = PTR_ERR(eclk);
   1348		goto dis_runtime_pm;
   1349	}
   1350
   1351	rnandc->ext_clk_rate = clk_get_rate(eclk);
   1352	clk_put(eclk);
   1353
   1354	rnandc_dis_interrupts(rnandc);
   1355	irq = platform_get_irq_optional(pdev, 0);
   1356	if (irq == -EPROBE_DEFER) {
   1357		ret = irq;
   1358		goto dis_runtime_pm;
   1359	} else if (irq < 0) {
   1360		dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
   1361		rnandc->use_polling = true;
   1362	} else {
   1363		ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
   1364				       "renesas-nand-controller", rnandc);
   1365		if (ret < 0)
   1366			goto dis_runtime_pm;
   1367	}
   1368
   1369	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
   1370	if (ret)
   1371		goto dis_runtime_pm;
   1372
   1373	rnandc_clear_fifo(rnandc);
   1374
   1375	platform_set_drvdata(pdev, rnandc);
   1376
   1377	ret = rnandc_chips_init(rnandc);
   1378	if (ret)
   1379		goto dis_runtime_pm;
   1380
   1381	return 0;
   1382
   1383dis_runtime_pm:
   1384	pm_runtime_put(&pdev->dev);
   1385
   1386	return ret;
   1387}
   1388
   1389static int rnandc_remove(struct platform_device *pdev)
   1390{
   1391	struct rnandc *rnandc = platform_get_drvdata(pdev);
   1392
   1393	rnandc_chips_cleanup(rnandc);
   1394
   1395	pm_runtime_put(&pdev->dev);
   1396
   1397	return 0;
   1398}
   1399
   1400static const struct of_device_id rnandc_id_table[] = {
   1401	{ .compatible = "renesas,rcar-gen3-nandc" },
   1402	{ .compatible = "renesas,rzn1-nandc" },
   1403	{} /* sentinel */
   1404};
   1405MODULE_DEVICE_TABLE(of, rnandc_id_table);
   1406
   1407static struct platform_driver rnandc_driver = {
   1408	.driver = {
   1409		.name = "renesas-nandc",
   1410		.of_match_table = rnandc_id_table,
   1411	},
   1412	.probe = rnandc_probe,
   1413	.remove = rnandc_remove,
   1414};
   1415module_platform_driver(rnandc_driver);
   1416
   1417MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
   1418MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
   1419MODULE_LICENSE("GPL v2");