cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

arasan-nand-controller.c (40751B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Arasan NAND Flash Controller Driver
      4 *
      5 * Copyright (C) 2014 - 2020 Xilinx, Inc.
      6 * Author:
      7 *   Miquel Raynal <miquel.raynal@bootlin.com>
      8 * Original work (fully rewritten):
      9 *   Punnaiah Choudary Kalluri <punnaia@xilinx.com>
     10 *   Naga Sureshkumar Relli <nagasure@xilinx.com>
     11 */
     12
     13#include <linux/bch.h>
     14#include <linux/bitfield.h>
     15#include <linux/clk.h>
     16#include <linux/delay.h>
     17#include <linux/dma-mapping.h>
     18#include <linux/gpio/consumer.h>
     19#include <linux/interrupt.h>
     20#include <linux/iopoll.h>
     21#include <linux/module.h>
     22#include <linux/mtd/mtd.h>
     23#include <linux/mtd/partitions.h>
     24#include <linux/mtd/rawnand.h>
     25#include <linux/of.h>
     26#include <linux/platform_device.h>
     27#include <linux/slab.h>
     28
     29#define PKT_REG				0x00
     30#define   PKT_SIZE(x)			FIELD_PREP(GENMASK(10, 0), (x))
     31#define   PKT_STEPS(x)			FIELD_PREP(GENMASK(23, 12), (x))
     32
     33#define MEM_ADDR1_REG			0x04
     34
     35#define MEM_ADDR2_REG			0x08
     36#define   ADDR2_STRENGTH(x)		FIELD_PREP(GENMASK(27, 25), (x))
     37#define   ADDR2_CS(x)			FIELD_PREP(GENMASK(31, 30), (x))
     38
     39#define CMD_REG				0x0C
     40#define   CMD_1(x)			FIELD_PREP(GENMASK(7, 0), (x))
     41#define   CMD_2(x)			FIELD_PREP(GENMASK(15, 8), (x))
     42#define   CMD_PAGE_SIZE(x)		FIELD_PREP(GENMASK(25, 23), (x))
     43#define   CMD_DMA_ENABLE		BIT(27)
     44#define   CMD_NADDRS(x)			FIELD_PREP(GENMASK(30, 28), (x))
     45#define   CMD_ECC_ENABLE		BIT(31)
     46
     47#define PROG_REG			0x10
     48#define   PROG_PGRD			BIT(0)
     49#define   PROG_ERASE			BIT(2)
     50#define   PROG_STATUS			BIT(3)
     51#define   PROG_PGPROG			BIT(4)
     52#define   PROG_RDID			BIT(6)
     53#define   PROG_RDPARAM			BIT(7)
     54#define   PROG_RST			BIT(8)
     55#define   PROG_GET_FEATURE		BIT(9)
     56#define   PROG_SET_FEATURE		BIT(10)
     57#define   PROG_CHG_RD_COL_ENH		BIT(14)
     58
     59#define INTR_STS_EN_REG			0x14
     60#define INTR_SIG_EN_REG			0x18
     61#define INTR_STS_REG			0x1C
     62#define   WRITE_READY			BIT(0)
     63#define   READ_READY			BIT(1)
     64#define   XFER_COMPLETE			BIT(2)
     65#define   DMA_BOUNDARY			BIT(6)
     66#define   EVENT_MASK			GENMASK(7, 0)
     67
     68#define READY_STS_REG			0x20
     69
     70#define DMA_ADDR0_REG			0x50
     71#define DMA_ADDR1_REG			0x24
     72
     73#define FLASH_STS_REG			0x28
     74
     75#define TIMING_REG			0x2C
     76#define   TCCS_TIME_500NS		0
     77#define   TCCS_TIME_300NS		3
     78#define   TCCS_TIME_200NS		2
     79#define   TCCS_TIME_100NS		1
     80#define   FAST_TCAD			BIT(2)
     81#define   DQS_BUFF_SEL_IN(x)		FIELD_PREP(GENMASK(6, 3), (x))
     82#define   DQS_BUFF_SEL_OUT(x)		FIELD_PREP(GENMASK(18, 15), (x))
     83
     84#define DATA_PORT_REG			0x30
     85
     86#define ECC_CONF_REG			0x34
     87#define   ECC_CONF_COL(x)		FIELD_PREP(GENMASK(15, 0), (x))
     88#define   ECC_CONF_LEN(x)		FIELD_PREP(GENMASK(26, 16), (x))
     89#define   ECC_CONF_BCH_EN		BIT(27)
     90
     91#define ECC_ERR_CNT_REG			0x38
     92#define   GET_PKT_ERR_CNT(x)		FIELD_GET(GENMASK(7, 0), (x))
     93#define   GET_PAGE_ERR_CNT(x)		FIELD_GET(GENMASK(16, 8), (x))
     94
     95#define ECC_SP_REG			0x3C
     96#define   ECC_SP_CMD1(x)		FIELD_PREP(GENMASK(7, 0), (x))
     97#define   ECC_SP_CMD2(x)		FIELD_PREP(GENMASK(15, 8), (x))
     98#define   ECC_SP_ADDRS(x)		FIELD_PREP(GENMASK(30, 28), (x))
     99
    100#define ECC_1ERR_CNT_REG		0x40
    101#define ECC_2ERR_CNT_REG		0x44
    102
    103#define DATA_INTERFACE_REG		0x6C
    104#define   DIFACE_SDR_MODE(x)		FIELD_PREP(GENMASK(2, 0), (x))
    105#define   DIFACE_DDR_MODE(x)		FIELD_PREP(GENMASK(5, 3), (x))
    106#define   DIFACE_SDR			0
    107#define   DIFACE_NVDDR			BIT(9)
    108
    109#define ANFC_MAX_CS			2
    110#define ANFC_DFLT_TIMEOUT_US		1000000
    111#define ANFC_MAX_CHUNK_SIZE		SZ_1M
    112#define ANFC_MAX_PARAM_SIZE		SZ_4K
    113#define ANFC_MAX_STEPS			SZ_2K
    114#define ANFC_MAX_PKT_SIZE		(SZ_2K - 1)
    115#define ANFC_MAX_ADDR_CYC		5U
    116#define ANFC_RSVD_ECC_BYTES		21
    117
    118#define ANFC_XLNX_SDR_DFLT_CORE_CLK	100000000
    119#define ANFC_XLNX_SDR_HS_CORE_CLK	80000000
    120
    121static struct gpio_desc *anfc_default_cs_array[2] = {NULL, NULL};
    122
    123/**
    124 * struct anfc_op - Defines how to execute an operation
    125 * @pkt_reg: Packet register
    126 * @addr1_reg: Memory address 1 register
    127 * @addr2_reg: Memory address 2 register
    128 * @cmd_reg: Command register
    129 * @prog_reg: Program register
    130 * @steps: Number of "packets" to read/write
    131 * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
    132 * @len: Data transfer length
    133 * @read: Data transfer direction from the controller point of view
    134 * @buf: Data buffer
    135 */
    136struct anfc_op {
    137	u32 pkt_reg;
    138	u32 addr1_reg;
    139	u32 addr2_reg;
    140	u32 cmd_reg;
    141	u32 prog_reg;
    142	int steps;
    143	unsigned int rdy_timeout_ms;
    144	unsigned int len;
    145	bool read;
    146	u8 *buf;
    147};
    148
    149/**
    150 * struct anand - Defines the NAND chip related information
    151 * @node:		Used to store NAND chips into a list
    152 * @chip:		NAND chip information structure
    153 * @rb:			Ready-busy line
    154 * @page_sz:		Register value of the page_sz field to use
    155 * @clk:		Expected clock frequency to use
    156 * @data_iface:		Data interface timing mode to use
    157 * @timings:		NV-DDR specific timings to use
    158 * @ecc_conf:		Hardware ECC configuration value
    159 * @strength:		Register value of the ECC strength
    160 * @raddr_cycles:	Row address cycle information
    161 * @caddr_cycles:	Column address cycle information
    162 * @ecc_bits:		Exact number of ECC bits per syndrome
    163 * @ecc_total:		Total number of ECC bytes
    164 * @errloc:		Array of errors located with soft BCH
    165 * @hw_ecc:		Buffer to store syndromes computed by hardware
    166 * @bch:		BCH structure
    167 * @cs_idx:		Array of chip-select for this device, values are indexes
    168 *			of the controller structure @gpio_cs array
    169 * @ncs_idx:		Size of the @cs_idx array
    170 */
    171struct anand {
    172	struct list_head node;
    173	struct nand_chip chip;
    174	unsigned int rb;
    175	unsigned int page_sz;
    176	unsigned long clk;
    177	u32 data_iface;
    178	u32 timings;
    179	u32 ecc_conf;
    180	u32 strength;
    181	u16 raddr_cycles;
    182	u16 caddr_cycles;
    183	unsigned int ecc_bits;
    184	unsigned int ecc_total;
    185	unsigned int *errloc;
    186	u8 *hw_ecc;
    187	struct bch_control *bch;
    188	int *cs_idx;
    189	int ncs_idx;
    190};
    191
    192/**
    193 * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
    194 * @dev:		Pointer to the device structure
    195 * @base:		Remapped register area
    196 * @controller_clk:		Pointer to the system clock
    197 * @bus_clk:		Pointer to the flash clock
    198 * @controller:		Base controller structure
    199 * @chips:		List of all NAND chips attached to the controller
    200 * @cur_clk:		Current clock rate
    201 * @cs_array:		CS array. Native CS are left empty, the other cells are
    202 *			populated with their corresponding GPIO descriptor.
    203 * @ncs:		Size of @cs_array
    204 * @cur_cs:		Index in @cs_array of the currently in use CS
    205 * @native_cs:		Currently selected native CS
    206 * @spare_cs:		Native CS that is not wired (may be selected when a GPIO
    207 *			CS is in use)
    208 */
    209struct arasan_nfc {
    210	struct device *dev;
    211	void __iomem *base;
    212	struct clk *controller_clk;
    213	struct clk *bus_clk;
    214	struct nand_controller controller;
    215	struct list_head chips;
    216	unsigned int cur_clk;
    217	struct gpio_desc **cs_array;
    218	unsigned int ncs;
    219	int cur_cs;
    220	unsigned int native_cs;
    221	unsigned int spare_cs;
    222};
    223
    224static struct anand *to_anand(struct nand_chip *nand)
    225{
    226	return container_of(nand, struct anand, chip);
    227}
    228
    229static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
    230{
    231	return container_of(ctrl, struct arasan_nfc, controller);
    232}
    233
    234static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
    235{
    236	u32 val;
    237	int ret;
    238
    239	ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
    240					 val & event, 0,
    241					 ANFC_DFLT_TIMEOUT_US);
    242	if (ret) {
    243		dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
    244		return -ETIMEDOUT;
    245	}
    246
    247	writel_relaxed(event, nfc->base + INTR_STS_REG);
    248
    249	return 0;
    250}
    251
    252static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
    253			    unsigned int timeout_ms)
    254{
    255	struct anand *anand = to_anand(chip);
    256	u32 val;
    257	int ret;
    258
    259	/* There is no R/B interrupt, we must poll a register */
    260	ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
    261					 val & BIT(anand->rb),
    262					 1, timeout_ms * 1000);
    263	if (ret) {
    264		dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
    265			readl_relaxed(nfc->base + READY_STS_REG));
    266		return -ETIMEDOUT;
    267	}
    268
    269	return 0;
    270}
    271
    272static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
    273{
    274	writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
    275	writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
    276	writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
    277	writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
    278	writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
    279}
    280
    281static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
    282			       unsigned int *pktsize)
    283{
    284	unsigned int nb, sz;
    285
    286	for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
    287		sz = len / nb;
    288		if (sz <= ANFC_MAX_PKT_SIZE)
    289			break;
    290	}
    291
    292	if (sz * nb != len)
    293		return -ENOTSUPP;
    294
    295	if (steps)
    296		*steps = nb;
    297
    298	if (pktsize)
    299		*pktsize = sz;
    300
    301	return 0;
    302}
    303
    304static bool anfc_is_gpio_cs(struct arasan_nfc *nfc, int nfc_cs)
    305{
    306	return nfc_cs >= 0 && nfc->cs_array[nfc_cs];
    307}
    308
    309static int anfc_relative_to_absolute_cs(struct anand *anand, int num)
    310{
    311	return anand->cs_idx[num];
    312}
    313
    314static void anfc_assert_cs(struct arasan_nfc *nfc, unsigned int nfc_cs_idx)
    315{
    316	/* CS did not change: do nothing */
    317	if (nfc->cur_cs == nfc_cs_idx)
    318		return;
    319
    320	/* Deassert the previous CS if it was a GPIO */
    321	if (anfc_is_gpio_cs(nfc, nfc->cur_cs))
    322		gpiod_set_value_cansleep(nfc->cs_array[nfc->cur_cs], 1);
    323
    324	/* Assert the new one */
    325	if (anfc_is_gpio_cs(nfc, nfc_cs_idx)) {
    326		nfc->native_cs = nfc->spare_cs;
    327		gpiod_set_value_cansleep(nfc->cs_array[nfc_cs_idx], 0);
    328	} else {
    329		nfc->native_cs = nfc_cs_idx;
    330	}
    331
    332	nfc->cur_cs = nfc_cs_idx;
    333}
    334
    335static int anfc_select_target(struct nand_chip *chip, int target)
    336{
    337	struct anand *anand = to_anand(chip);
    338	struct arasan_nfc *nfc = to_anfc(chip->controller);
    339	unsigned int nfc_cs_idx = anfc_relative_to_absolute_cs(anand, target);
    340	int ret;
    341
    342	anfc_assert_cs(nfc, nfc_cs_idx);
    343
    344	/* Update the controller timings and the potential ECC configuration */
    345	writel_relaxed(anand->data_iface, nfc->base + DATA_INTERFACE_REG);
    346	writel_relaxed(anand->timings, nfc->base + TIMING_REG);
    347
    348	/* Update clock frequency */
    349	if (nfc->cur_clk != anand->clk) {
    350		clk_disable_unprepare(nfc->controller_clk);
    351		ret = clk_set_rate(nfc->controller_clk, anand->clk);
    352		if (ret) {
    353			dev_err(nfc->dev, "Failed to change clock rate\n");
    354			return ret;
    355		}
    356
    357		ret = clk_prepare_enable(nfc->controller_clk);
    358		if (ret) {
    359			dev_err(nfc->dev,
    360				"Failed to re-enable the controller clock\n");
    361			return ret;
    362		}
    363
    364		nfc->cur_clk = anand->clk;
    365	}
    366
    367	return 0;
    368}
    369
    370/*
    371 * When using the embedded hardware ECC engine, the controller is in charge of
    372 * feeding the engine with, first, the ECC residue present in the data array.
    373 * A typical read operation is:
    374 * 1/ Assert the read operation by sending the relevant command/address cycles
    375 *    but targeting the column of the first ECC bytes in the OOB area instead of
    376 *    the main data directly.
    377 * 2/ After having read the relevant number of ECC bytes, the controller uses
    378 *    the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
    379 *    Register" to move the pointer back at the beginning of the main data.
    380 * 3/ It will read the content of the main area for a given size (pktsize) and
    381 *    will feed the ECC engine with this buffer again.
    382 * 4/ The ECC engine derives the ECC bytes for the given data and compare them
    383 *    with the ones already received. It eventually trigger status flags and
    384 *    then set the "Buffer Read Ready" flag.
    385 * 5/ The corrected data is then available for reading from the data port
    386 *    register.
    387 *
    388 * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
    389 * reports uncorrectable errors. Because of this bug, we have to use the
    390 * software BCH implementation in the read path.
    391 */
    392static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
    393				 int oob_required, int page)
    394{
    395	struct arasan_nfc *nfc = to_anfc(chip->controller);
    396	struct mtd_info *mtd = nand_to_mtd(chip);
    397	struct anand *anand = to_anand(chip);
    398	unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
    399	unsigned int max_bitflips = 0;
    400	dma_addr_t dma_addr;
    401	int step, ret;
    402	struct anfc_op nfc_op = {
    403		.pkt_reg =
    404			PKT_SIZE(chip->ecc.size) |
    405			PKT_STEPS(chip->ecc.steps),
    406		.addr1_reg =
    407			(page & 0xFF) << (8 * (anand->caddr_cycles)) |
    408			(((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
    409		.addr2_reg =
    410			((page >> 16) & 0xFF) |
    411			ADDR2_STRENGTH(anand->strength) |
    412			ADDR2_CS(nfc->native_cs),
    413		.cmd_reg =
    414			CMD_1(NAND_CMD_READ0) |
    415			CMD_2(NAND_CMD_READSTART) |
    416			CMD_PAGE_SIZE(anand->page_sz) |
    417			CMD_DMA_ENABLE |
    418			CMD_NADDRS(anand->caddr_cycles +
    419				   anand->raddr_cycles),
    420		.prog_reg = PROG_PGRD,
    421	};
    422
    423	dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
    424	if (dma_mapping_error(nfc->dev, dma_addr)) {
    425		dev_err(nfc->dev, "Buffer mapping error");
    426		return -EIO;
    427	}
    428
    429	writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
    430	writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
    431
    432	anfc_trigger_op(nfc, &nfc_op);
    433
    434	ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
    435	dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
    436	if (ret) {
    437		dev_err(nfc->dev, "Error reading page %d\n", page);
    438		return ret;
    439	}
    440
    441	/* Store the raw OOB bytes as well */
    442	ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
    443					 mtd->oobsize, 0);
    444	if (ret)
    445		return ret;
    446
    447	/*
    448	 * For each step, compute by softare the BCH syndrome over the raw data.
    449	 * Compare the theoretical amount of errors and compare with the
    450	 * hardware engine feedback.
    451	 */
    452	for (step = 0; step < chip->ecc.steps; step++) {
    453		u8 *raw_buf = &buf[step * chip->ecc.size];
    454		unsigned int bit, byte;
    455		int bf, i;
    456
    457		/* Extract the syndrome, it is not necessarily aligned */
    458		memset(anand->hw_ecc, 0, chip->ecc.bytes);
    459		nand_extract_bits(anand->hw_ecc, 0,
    460				  &chip->oob_poi[mtd->oobsize - anand->ecc_total],
    461				  anand->ecc_bits * step, anand->ecc_bits);
    462
    463		bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
    464				anand->hw_ecc, NULL, NULL, anand->errloc);
    465		if (!bf) {
    466			continue;
    467		} else if (bf > 0) {
    468			for (i = 0; i < bf; i++) {
    469				/* Only correct the data, not the syndrome */
    470				if (anand->errloc[i] < (chip->ecc.size * 8)) {
    471					bit = BIT(anand->errloc[i] & 7);
    472					byte = anand->errloc[i] >> 3;
    473					raw_buf[byte] ^= bit;
    474				}
    475			}
    476
    477			mtd->ecc_stats.corrected += bf;
    478			max_bitflips = max_t(unsigned int, max_bitflips, bf);
    479
    480			continue;
    481		}
    482
    483		bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
    484						 NULL, 0, NULL, 0,
    485						 chip->ecc.strength);
    486		if (bf > 0) {
    487			mtd->ecc_stats.corrected += bf;
    488			max_bitflips = max_t(unsigned int, max_bitflips, bf);
    489			memset(raw_buf, 0xFF, chip->ecc.size);
    490		} else if (bf < 0) {
    491			mtd->ecc_stats.failed++;
    492		}
    493	}
    494
    495	return 0;
    496}
    497
    498static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
    499				     int oob_required, int page)
    500{
    501	int ret;
    502
    503	ret = anfc_select_target(chip, chip->cur_cs);
    504	if (ret)
    505		return ret;
    506
    507	return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
    508};
    509
    510static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
    511				  int oob_required, int page)
    512{
    513	struct anand *anand = to_anand(chip);
    514	struct arasan_nfc *nfc = to_anfc(chip->controller);
    515	struct mtd_info *mtd = nand_to_mtd(chip);
    516	unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
    517	dma_addr_t dma_addr;
    518	int ret;
    519	struct anfc_op nfc_op = {
    520		.pkt_reg =
    521			PKT_SIZE(chip->ecc.size) |
    522			PKT_STEPS(chip->ecc.steps),
    523		.addr1_reg =
    524			(page & 0xFF) << (8 * (anand->caddr_cycles)) |
    525			(((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
    526		.addr2_reg =
    527			((page >> 16) & 0xFF) |
    528			ADDR2_STRENGTH(anand->strength) |
    529			ADDR2_CS(nfc->native_cs),
    530		.cmd_reg =
    531			CMD_1(NAND_CMD_SEQIN) |
    532			CMD_2(NAND_CMD_PAGEPROG) |
    533			CMD_PAGE_SIZE(anand->page_sz) |
    534			CMD_DMA_ENABLE |
    535			CMD_NADDRS(anand->caddr_cycles +
    536				   anand->raddr_cycles) |
    537			CMD_ECC_ENABLE,
    538		.prog_reg = PROG_PGPROG,
    539	};
    540
    541	writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
    542	writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
    543		       ECC_SP_ADDRS(anand->caddr_cycles),
    544		       nfc->base + ECC_SP_REG);
    545
    546	dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
    547	if (dma_mapping_error(nfc->dev, dma_addr)) {
    548		dev_err(nfc->dev, "Buffer mapping error");
    549		return -EIO;
    550	}
    551
    552	writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
    553	writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
    554
    555	anfc_trigger_op(nfc, &nfc_op);
    556	ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
    557	dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
    558	if (ret) {
    559		dev_err(nfc->dev, "Error writing page %d\n", page);
    560		return ret;
    561	}
    562
    563	/* Spare data is not protected */
    564	if (oob_required)
    565		ret = nand_write_oob_std(chip, page);
    566
    567	return ret;
    568}
    569
    570static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
    571				      int oob_required, int page)
    572{
    573	int ret;
    574
    575	ret = anfc_select_target(chip, chip->cur_cs);
    576	if (ret)
    577		return ret;
    578
    579	return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
    580};
    581
    582/* NAND framework ->exec_op() hooks and related helpers */
    583static int anfc_parse_instructions(struct nand_chip *chip,
    584				   const struct nand_subop *subop,
    585				   struct anfc_op *nfc_op)
    586{
    587	struct arasan_nfc *nfc = to_anfc(chip->controller);
    588	struct anand *anand = to_anand(chip);
    589	const struct nand_op_instr *instr = NULL;
    590	bool first_cmd = true;
    591	unsigned int op_id;
    592	int ret, i;
    593
    594	memset(nfc_op, 0, sizeof(*nfc_op));
    595	nfc_op->addr2_reg = ADDR2_CS(nfc->native_cs);
    596	nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
    597
    598	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
    599		unsigned int offset, naddrs, pktsize;
    600		const u8 *addrs;
    601		u8 *buf;
    602
    603		instr = &subop->instrs[op_id];
    604
    605		switch (instr->type) {
    606		case NAND_OP_CMD_INSTR:
    607			if (first_cmd)
    608				nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
    609			else
    610				nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
    611
    612			first_cmd = false;
    613			break;
    614
    615		case NAND_OP_ADDR_INSTR:
    616			offset = nand_subop_get_addr_start_off(subop, op_id);
    617			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
    618			addrs = &instr->ctx.addr.addrs[offset];
    619			nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
    620
    621			for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
    622				if (i < 4)
    623					nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
    624				else
    625					nfc_op->addr2_reg |= addrs[i];
    626			}
    627
    628			break;
    629		case NAND_OP_DATA_IN_INSTR:
    630			nfc_op->read = true;
    631			fallthrough;
    632		case NAND_OP_DATA_OUT_INSTR:
    633			offset = nand_subop_get_data_start_off(subop, op_id);
    634			buf = instr->ctx.data.buf.in;
    635			nfc_op->buf = &buf[offset];
    636			nfc_op->len = nand_subop_get_data_len(subop, op_id);
    637			ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
    638						  &pktsize);
    639			if (ret)
    640				return ret;
    641
    642			/*
    643			 * Number of DATA cycles must be aligned on 4, this
    644			 * means the controller might read/write more than
    645			 * requested. This is harmless most of the time as extra
    646			 * DATA are discarded in the write path and read pointer
    647			 * adjusted in the read path.
    648			 *
    649			 * FIXME: The core should mark operations where
    650			 * reading/writing more is allowed so the exec_op()
    651			 * implementation can take the right decision when the
    652			 * alignment constraint is not met: adjust the number of
    653			 * DATA cycles when it's allowed, reject the operation
    654			 * otherwise.
    655			 */
    656			nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
    657					   PKT_STEPS(nfc_op->steps);
    658			break;
    659		case NAND_OP_WAITRDY_INSTR:
    660			nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
    661			break;
    662		}
    663	}
    664
    665	return 0;
    666}
    667
    668static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
    669{
    670	unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
    671	unsigned int last_len = nfc_op->len % 4;
    672	unsigned int offset, dir;
    673	u8 *buf = nfc_op->buf;
    674	int ret, i;
    675
    676	for (i = 0; i < nfc_op->steps; i++) {
    677		dir = nfc_op->read ? READ_READY : WRITE_READY;
    678		ret = anfc_wait_for_event(nfc, dir);
    679		if (ret) {
    680			dev_err(nfc->dev, "PIO %s ready signal not received\n",
    681				nfc_op->read ? "Read" : "Write");
    682			return ret;
    683		}
    684
    685		offset = i * (dwords * 4);
    686		if (nfc_op->read)
    687			ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
    688				     dwords);
    689		else
    690			iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
    691				      dwords);
    692	}
    693
    694	if (last_len) {
    695		u32 remainder;
    696
    697		offset = nfc_op->len - last_len;
    698
    699		if (nfc_op->read) {
    700			remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
    701			memcpy(&buf[offset], &remainder, last_len);
    702		} else {
    703			memcpy(&remainder, &buf[offset], last_len);
    704			writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
    705		}
    706	}
    707
    708	return anfc_wait_for_event(nfc, XFER_COMPLETE);
    709}
    710
    711static int anfc_misc_data_type_exec(struct nand_chip *chip,
    712				    const struct nand_subop *subop,
    713				    u32 prog_reg)
    714{
    715	struct arasan_nfc *nfc = to_anfc(chip->controller);
    716	struct anfc_op nfc_op = {};
    717	int ret;
    718
    719	ret = anfc_parse_instructions(chip, subop, &nfc_op);
    720	if (ret)
    721		return ret;
    722
    723	nfc_op.prog_reg = prog_reg;
    724	anfc_trigger_op(nfc, &nfc_op);
    725
    726	if (nfc_op.rdy_timeout_ms) {
    727		ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
    728		if (ret)
    729			return ret;
    730	}
    731
    732	return anfc_rw_pio_op(nfc, &nfc_op);
    733}
    734
    735static int anfc_param_read_type_exec(struct nand_chip *chip,
    736				     const struct nand_subop *subop)
    737{
    738	return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
    739}
    740
    741static int anfc_data_read_type_exec(struct nand_chip *chip,
    742				    const struct nand_subop *subop)
    743{
    744	u32 prog_reg = PROG_PGRD;
    745
    746	/*
    747	 * Experience shows that while in SDR mode sending a CHANGE READ COLUMN
    748	 * command through the READ PAGE "type" always works fine, when in
    749	 * NV-DDR mode the same command simply fails. However, it was also
    750	 * spotted that any CHANGE READ COLUMN command sent through the CHANGE
    751	 * READ COLUMN ENHANCED "type" would correctly work in both cases (SDR
    752	 * and NV-DDR). So, for simplicity, let's program the controller with
    753	 * the CHANGE READ COLUMN ENHANCED "type" whenever we are requested to
    754	 * perform a CHANGE READ COLUMN operation.
    755	 */
    756	if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_RNDOUT &&
    757	    subop->instrs[2].ctx.cmd.opcode == NAND_CMD_RNDOUTSTART)
    758		prog_reg = PROG_CHG_RD_COL_ENH;
    759
    760	return anfc_misc_data_type_exec(chip, subop, prog_reg);
    761}
    762
    763static int anfc_param_write_type_exec(struct nand_chip *chip,
    764				      const struct nand_subop *subop)
    765{
    766	return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
    767}
    768
    769static int anfc_data_write_type_exec(struct nand_chip *chip,
    770				     const struct nand_subop *subop)
    771{
    772	return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
    773}
    774
    775static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
    776				       const struct nand_subop *subop,
    777				       u32 prog_reg)
    778{
    779	struct arasan_nfc *nfc = to_anfc(chip->controller);
    780	struct anfc_op nfc_op = {};
    781	int ret;
    782
    783	ret = anfc_parse_instructions(chip, subop, &nfc_op);
    784	if (ret)
    785		return ret;
    786
    787	nfc_op.prog_reg = prog_reg;
    788	anfc_trigger_op(nfc, &nfc_op);
    789
    790	ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
    791	if (ret)
    792		return ret;
    793
    794	if (nfc_op.rdy_timeout_ms)
    795		ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
    796
    797	return ret;
    798}
    799
    800static int anfc_status_type_exec(struct nand_chip *chip,
    801				 const struct nand_subop *subop)
    802{
    803	struct arasan_nfc *nfc = to_anfc(chip->controller);
    804	u32 tmp;
    805	int ret;
    806
    807	/* See anfc_check_op() for details about this constraint */
    808	if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
    809		return -ENOTSUPP;
    810
    811	ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
    812	if (ret)
    813		return ret;
    814
    815	tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
    816	memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
    817
    818	return 0;
    819}
    820
    821static int anfc_reset_type_exec(struct nand_chip *chip,
    822				const struct nand_subop *subop)
    823{
    824	return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
    825}
    826
    827static int anfc_erase_type_exec(struct nand_chip *chip,
    828				const struct nand_subop *subop)
    829{
    830	return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
    831}
    832
    833static int anfc_wait_type_exec(struct nand_chip *chip,
    834			       const struct nand_subop *subop)
    835{
    836	struct arasan_nfc *nfc = to_anfc(chip->controller);
    837	struct anfc_op nfc_op = {};
    838	int ret;
    839
    840	ret = anfc_parse_instructions(chip, subop, &nfc_op);
    841	if (ret)
    842		return ret;
    843
    844	return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
    845}
    846
    847static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
    848	NAND_OP_PARSER_PATTERN(
    849		anfc_param_read_type_exec,
    850		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    851		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
    852		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
    853		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
    854	NAND_OP_PARSER_PATTERN(
    855		anfc_param_write_type_exec,
    856		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    857		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
    858		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
    859	NAND_OP_PARSER_PATTERN(
    860		anfc_data_read_type_exec,
    861		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    862		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
    863		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    864		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
    865		NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
    866	NAND_OP_PARSER_PATTERN(
    867		anfc_data_write_type_exec,
    868		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    869		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
    870		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
    871		NAND_OP_PARSER_PAT_CMD_ELEM(false)),
    872	NAND_OP_PARSER_PATTERN(
    873		anfc_reset_type_exec,
    874		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    875		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
    876	NAND_OP_PARSER_PATTERN(
    877		anfc_erase_type_exec,
    878		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    879		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
    880		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    881		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
    882	NAND_OP_PARSER_PATTERN(
    883		anfc_status_type_exec,
    884		NAND_OP_PARSER_PAT_CMD_ELEM(false),
    885		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
    886	NAND_OP_PARSER_PATTERN(
    887		anfc_wait_type_exec,
    888		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
    889	);
    890
    891static int anfc_check_op(struct nand_chip *chip,
    892			 const struct nand_operation *op)
    893{
    894	const struct nand_op_instr *instr;
    895	int op_id;
    896
    897	/*
    898	 * The controller abstracts all the NAND operations and do not support
    899	 * data only operations.
    900	 *
    901	 * TODO: The nand_op_parser framework should be extended to
    902	 * support custom checks on DATA instructions.
    903	 */
    904	for (op_id = 0; op_id < op->ninstrs; op_id++) {
    905		instr = &op->instrs[op_id];
    906
    907		switch (instr->type) {
    908		case NAND_OP_ADDR_INSTR:
    909			if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
    910				return -ENOTSUPP;
    911
    912			break;
    913		case NAND_OP_DATA_IN_INSTR:
    914		case NAND_OP_DATA_OUT_INSTR:
    915			if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
    916				return -ENOTSUPP;
    917
    918			if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
    919				return -ENOTSUPP;
    920
    921			break;
    922		default:
    923			break;
    924		}
    925	}
    926
    927	/*
    928	 * The controller does not allow to proceed with a CMD+DATA_IN cycle
    929	 * manually on the bus by reading data from the data register. Instead,
    930	 * the controller abstract a status read operation with its own status
    931	 * register after ordering a read status operation. Hence, we cannot
    932	 * support any CMD+DATA_IN operation other than a READ STATUS.
    933	 *
    934	 * TODO: The nand_op_parser() framework should be extended to describe
    935	 * fixed patterns instead of open-coding this check here.
    936	 */
    937	if (op->ninstrs == 2 &&
    938	    op->instrs[0].type == NAND_OP_CMD_INSTR &&
    939	    op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
    940	    op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
    941		return -ENOTSUPP;
    942
    943	return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
    944}
    945
    946static int anfc_exec_op(struct nand_chip *chip,
    947			const struct nand_operation *op,
    948			bool check_only)
    949{
    950	int ret;
    951
    952	if (check_only)
    953		return anfc_check_op(chip, op);
    954
    955	ret = anfc_select_target(chip, op->cs);
    956	if (ret)
    957		return ret;
    958
    959	return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
    960}
    961
    962static int anfc_setup_interface(struct nand_chip *chip, int target,
    963				const struct nand_interface_config *conf)
    964{
    965	struct anand *anand = to_anand(chip);
    966	struct arasan_nfc *nfc = to_anfc(chip->controller);
    967	struct device_node *np = nfc->dev->of_node;
    968	const struct nand_sdr_timings *sdr;
    969	const struct nand_nvddr_timings *nvddr;
    970	unsigned int tccs_min, dqs_mode, fast_tcad;
    971
    972	if (nand_interface_is_nvddr(conf)) {
    973		nvddr = nand_get_nvddr_timings(conf);
    974		if (IS_ERR(nvddr))
    975			return PTR_ERR(nvddr);
    976
    977		/*
    978		 * The controller only supports data payload requests which are
    979		 * a multiple of 4. In practice, most data accesses are 4-byte
    980		 * aligned and this is not an issue. However, rounding up will
    981		 * simply be refused by the controller if we reached the end of
    982		 * the device *and* we are using the NV-DDR interface(!). In
    983		 * this situation, unaligned data requests ending at the device
    984		 * boundary will confuse the controller and cannot be performed.
    985		 *
    986		 * This is something that happens in nand_read_subpage() when
    987		 * selecting software ECC support and must be avoided.
    988		 */
    989		if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT)
    990			return -ENOTSUPP;
    991	} else {
    992		sdr = nand_get_sdr_timings(conf);
    993		if (IS_ERR(sdr))
    994			return PTR_ERR(sdr);
    995	}
    996
    997	if (target < 0)
    998		return 0;
    999
   1000	if (nand_interface_is_sdr(conf)) {
   1001		anand->data_iface = DIFACE_SDR |
   1002				    DIFACE_SDR_MODE(conf->timings.mode);
   1003		anand->timings = 0;
   1004	} else {
   1005		anand->data_iface = DIFACE_NVDDR |
   1006				    DIFACE_DDR_MODE(conf->timings.mode);
   1007
   1008		if (conf->timings.nvddr.tCCS_min <= 100000)
   1009			tccs_min = TCCS_TIME_100NS;
   1010		else if (conf->timings.nvddr.tCCS_min <= 200000)
   1011			tccs_min = TCCS_TIME_200NS;
   1012		else if (conf->timings.nvddr.tCCS_min <= 300000)
   1013			tccs_min = TCCS_TIME_300NS;
   1014		else
   1015			tccs_min = TCCS_TIME_500NS;
   1016
   1017		fast_tcad = 0;
   1018		if (conf->timings.nvddr.tCAD_min < 45000)
   1019			fast_tcad = FAST_TCAD;
   1020
   1021		switch (conf->timings.mode) {
   1022		case 5:
   1023		case 4:
   1024			dqs_mode = 2;
   1025			break;
   1026		case 3:
   1027			dqs_mode = 3;
   1028			break;
   1029		case 2:
   1030			dqs_mode = 4;
   1031			break;
   1032		case 1:
   1033			dqs_mode = 5;
   1034			break;
   1035		case 0:
   1036		default:
   1037			dqs_mode = 6;
   1038			break;
   1039		}
   1040
   1041		anand->timings = tccs_min | fast_tcad |
   1042				 DQS_BUFF_SEL_IN(dqs_mode) |
   1043				 DQS_BUFF_SEL_OUT(dqs_mode);
   1044	}
   1045
   1046	anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
   1047
   1048	/*
   1049	 * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
   1050	 * with f > 90MHz (default clock is 100MHz) but signals are unstable
   1051	 * with higher modes. Hence we decrease a little bit the clock rate to
   1052	 * 80MHz when using SDR modes 2-5 with this SoC.
   1053	 */
   1054	if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
   1055	    nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
   1056		anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
   1057
   1058	return 0;
   1059}
   1060
   1061static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
   1062{
   1063	unsigned int bch_gf_mag, ecc_bits;
   1064
   1065	switch (step_size) {
   1066	case SZ_512:
   1067		bch_gf_mag = 13;
   1068		break;
   1069	case SZ_1K:
   1070		bch_gf_mag = 14;
   1071		break;
   1072	default:
   1073		return -EINVAL;
   1074	}
   1075
   1076	ecc_bits = bch_gf_mag * strength;
   1077
   1078	return DIV_ROUND_UP(ecc_bits, 8);
   1079}
   1080
   1081static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
   1082
   1083static const int anfc_hw_ecc_1024_strengths[] = {24};
   1084
   1085static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
   1086	{
   1087		.stepsize = SZ_512,
   1088		.strengths = anfc_hw_ecc_512_strengths,
   1089		.nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
   1090	},
   1091	{
   1092		.stepsize = SZ_1K,
   1093		.strengths = anfc_hw_ecc_1024_strengths,
   1094		.nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
   1095	},
   1096};
   1097
   1098static const struct nand_ecc_caps anfc_hw_ecc_caps = {
   1099	.stepinfos = anfc_hw_ecc_step_infos,
   1100	.nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
   1101	.calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
   1102};
   1103
   1104static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
   1105				       struct nand_chip *chip)
   1106{
   1107	struct anand *anand = to_anand(chip);
   1108	struct mtd_info *mtd = nand_to_mtd(chip);
   1109	struct nand_ecc_ctrl *ecc = &chip->ecc;
   1110	unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
   1111	int ret;
   1112
   1113	switch (mtd->writesize) {
   1114	case SZ_512:
   1115	case SZ_2K:
   1116	case SZ_4K:
   1117	case SZ_8K:
   1118	case SZ_16K:
   1119		break;
   1120	default:
   1121		dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
   1122		return -EINVAL;
   1123	}
   1124
   1125	ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
   1126	if (ret)
   1127		return ret;
   1128
   1129	switch (ecc->strength) {
   1130	case 12:
   1131		anand->strength = 0x1;
   1132		break;
   1133	case 8:
   1134		anand->strength = 0x2;
   1135		break;
   1136	case 4:
   1137		anand->strength = 0x3;
   1138		break;
   1139	case 24:
   1140		anand->strength = 0x4;
   1141		break;
   1142	default:
   1143		dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
   1144		return -EINVAL;
   1145	}
   1146
   1147	switch (ecc->size) {
   1148	case SZ_512:
   1149		bch_gf_mag = 13;
   1150		bch_prim_poly = 0x201b;
   1151		break;
   1152	case SZ_1K:
   1153		bch_gf_mag = 14;
   1154		bch_prim_poly = 0x4443;
   1155		break;
   1156	default:
   1157		dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
   1158		return -EINVAL;
   1159	}
   1160
   1161	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
   1162
   1163	ecc->steps = mtd->writesize / ecc->size;
   1164	ecc->algo = NAND_ECC_ALGO_BCH;
   1165	anand->ecc_bits = bch_gf_mag * ecc->strength;
   1166	ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
   1167	anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
   1168	ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
   1169	anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
   1170			  ECC_CONF_LEN(anand->ecc_total) |
   1171			  ECC_CONF_BCH_EN;
   1172
   1173	anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
   1174					   sizeof(*anand->errloc), GFP_KERNEL);
   1175	if (!anand->errloc)
   1176		return -ENOMEM;
   1177
   1178	anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
   1179	if (!anand->hw_ecc)
   1180		return -ENOMEM;
   1181
   1182	/* Enforce bit swapping to fit the hardware */
   1183	anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
   1184	if (!anand->bch)
   1185		return -EINVAL;
   1186
   1187	ecc->read_page = anfc_sel_read_page_hw_ecc;
   1188	ecc->write_page = anfc_sel_write_page_hw_ecc;
   1189
   1190	return 0;
   1191}
   1192
   1193static int anfc_attach_chip(struct nand_chip *chip)
   1194{
   1195	struct anand *anand = to_anand(chip);
   1196	struct arasan_nfc *nfc = to_anfc(chip->controller);
   1197	struct mtd_info *mtd = nand_to_mtd(chip);
   1198	int ret = 0;
   1199
   1200	if (mtd->writesize <= SZ_512)
   1201		anand->caddr_cycles = 1;
   1202	else
   1203		anand->caddr_cycles = 2;
   1204
   1205	if (chip->options & NAND_ROW_ADDR_3)
   1206		anand->raddr_cycles = 3;
   1207	else
   1208		anand->raddr_cycles = 2;
   1209
   1210	switch (mtd->writesize) {
   1211	case 512:
   1212		anand->page_sz = 0;
   1213		break;
   1214	case 1024:
   1215		anand->page_sz = 5;
   1216		break;
   1217	case 2048:
   1218		anand->page_sz = 1;
   1219		break;
   1220	case 4096:
   1221		anand->page_sz = 2;
   1222		break;
   1223	case 8192:
   1224		anand->page_sz = 3;
   1225		break;
   1226	case 16384:
   1227		anand->page_sz = 4;
   1228		break;
   1229	default:
   1230		return -EINVAL;
   1231	}
   1232
   1233	/* These hooks are valid for all ECC providers */
   1234	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
   1235	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
   1236
   1237	switch (chip->ecc.engine_type) {
   1238	case NAND_ECC_ENGINE_TYPE_NONE:
   1239	case NAND_ECC_ENGINE_TYPE_SOFT:
   1240	case NAND_ECC_ENGINE_TYPE_ON_DIE:
   1241		break;
   1242	case NAND_ECC_ENGINE_TYPE_ON_HOST:
   1243		ret = anfc_init_hw_ecc_controller(nfc, chip);
   1244		break;
   1245	default:
   1246		dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
   1247			chip->ecc.engine_type);
   1248		return -EINVAL;
   1249	}
   1250
   1251	return ret;
   1252}
   1253
   1254static void anfc_detach_chip(struct nand_chip *chip)
   1255{
   1256	struct anand *anand = to_anand(chip);
   1257
   1258	if (anand->bch)
   1259		bch_free(anand->bch);
   1260}
   1261
   1262static const struct nand_controller_ops anfc_ops = {
   1263	.exec_op = anfc_exec_op,
   1264	.setup_interface = anfc_setup_interface,
   1265	.attach_chip = anfc_attach_chip,
   1266	.detach_chip = anfc_detach_chip,
   1267};
   1268
   1269static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
   1270{
   1271	struct anand *anand;
   1272	struct nand_chip *chip;
   1273	struct mtd_info *mtd;
   1274	int rb, ret, i;
   1275
   1276	anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
   1277	if (!anand)
   1278		return -ENOMEM;
   1279
   1280	/* Chip-select init */
   1281	anand->ncs_idx = of_property_count_elems_of_size(np, "reg", sizeof(u32));
   1282	if (anand->ncs_idx <= 0 || anand->ncs_idx > nfc->ncs) {
   1283		dev_err(nfc->dev, "Invalid reg property\n");
   1284		return -EINVAL;
   1285	}
   1286
   1287	anand->cs_idx = devm_kcalloc(nfc->dev, anand->ncs_idx,
   1288				     sizeof(*anand->cs_idx), GFP_KERNEL);
   1289	if (!anand->cs_idx)
   1290		return -ENOMEM;
   1291
   1292	for (i = 0; i < anand->ncs_idx; i++) {
   1293		ret = of_property_read_u32_index(np, "reg", i,
   1294						 &anand->cs_idx[i]);
   1295		if (ret) {
   1296			dev_err(nfc->dev, "invalid CS property: %d\n", ret);
   1297			return ret;
   1298		}
   1299	}
   1300
   1301	/* Ready-busy init */
   1302	ret = of_property_read_u32(np, "nand-rb", &rb);
   1303	if (ret)
   1304		return ret;
   1305
   1306	if (rb >= ANFC_MAX_CS) {
   1307		dev_err(nfc->dev, "Wrong RB %d\n", rb);
   1308		return -EINVAL;
   1309	}
   1310
   1311	anand->rb = rb;
   1312
   1313	chip = &anand->chip;
   1314	mtd = nand_to_mtd(chip);
   1315	mtd->dev.parent = nfc->dev;
   1316	chip->controller = &nfc->controller;
   1317	chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
   1318			NAND_USES_DMA;
   1319
   1320	nand_set_flash_node(chip, np);
   1321	if (!mtd->name) {
   1322		dev_err(nfc->dev, "NAND label property is mandatory\n");
   1323		return -EINVAL;
   1324	}
   1325
   1326	ret = nand_scan(chip, anand->ncs_idx);
   1327	if (ret) {
   1328		dev_err(nfc->dev, "Scan operation failed\n");
   1329		return ret;
   1330	}
   1331
   1332	ret = mtd_device_register(mtd, NULL, 0);
   1333	if (ret) {
   1334		nand_cleanup(chip);
   1335		return ret;
   1336	}
   1337
   1338	list_add_tail(&anand->node, &nfc->chips);
   1339
   1340	return 0;
   1341}
   1342
   1343static void anfc_chips_cleanup(struct arasan_nfc *nfc)
   1344{
   1345	struct anand *anand, *tmp;
   1346	struct nand_chip *chip;
   1347	int ret;
   1348
   1349	list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
   1350		chip = &anand->chip;
   1351		ret = mtd_device_unregister(nand_to_mtd(chip));
   1352		WARN_ON(ret);
   1353		nand_cleanup(chip);
   1354		list_del(&anand->node);
   1355	}
   1356}
   1357
   1358static int anfc_chips_init(struct arasan_nfc *nfc)
   1359{
   1360	struct device_node *np = nfc->dev->of_node, *nand_np;
   1361	int nchips = of_get_child_count(np);
   1362	int ret;
   1363
   1364	if (!nchips) {
   1365		dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
   1366			nchips);
   1367		return -EINVAL;
   1368	}
   1369
   1370	for_each_child_of_node(np, nand_np) {
   1371		ret = anfc_chip_init(nfc, nand_np);
   1372		if (ret) {
   1373			of_node_put(nand_np);
   1374			anfc_chips_cleanup(nfc);
   1375			break;
   1376		}
   1377	}
   1378
   1379	return ret;
   1380}
   1381
   1382static void anfc_reset(struct arasan_nfc *nfc)
   1383{
   1384	/* Disable interrupt signals */
   1385	writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
   1386
   1387	/* Enable interrupt status */
   1388	writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
   1389
   1390	nfc->cur_cs = -1;
   1391}
   1392
   1393static int anfc_parse_cs(struct arasan_nfc *nfc)
   1394{
   1395	int ret;
   1396
   1397	/* Check the gpio-cs property */
   1398	ret = rawnand_dt_parse_gpio_cs(nfc->dev, &nfc->cs_array, &nfc->ncs);
   1399	if (ret)
   1400		return ret;
   1401
   1402	/*
   1403	 * The controller native CS cannot be both disabled at the same time.
   1404	 * Hence, only one native CS can be used if GPIO CS are needed, so that
   1405	 * the other is selected when a non-native CS must be asserted (not
   1406	 * wired physically or configured as GPIO instead of NAND CS). In this
   1407	 * case, the "not" chosen CS is assigned to nfc->spare_cs and selected
   1408	 * whenever a GPIO CS must be asserted.
   1409	 */
   1410	if (nfc->cs_array && nfc->ncs > 2) {
   1411		if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
   1412			dev_err(nfc->dev,
   1413				"Assign a single native CS when using GPIOs\n");
   1414			return -EINVAL;
   1415		}
   1416
   1417		if (nfc->cs_array[0])
   1418			nfc->spare_cs = 0;
   1419		else
   1420			nfc->spare_cs = 1;
   1421	}
   1422
   1423	if (!nfc->cs_array) {
   1424		nfc->cs_array = anfc_default_cs_array;
   1425		nfc->ncs = ANFC_MAX_CS;
   1426		return 0;
   1427	}
   1428
   1429	return 0;
   1430}
   1431
   1432static int anfc_probe(struct platform_device *pdev)
   1433{
   1434	struct arasan_nfc *nfc;
   1435	int ret;
   1436
   1437	nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
   1438	if (!nfc)
   1439		return -ENOMEM;
   1440
   1441	nfc->dev = &pdev->dev;
   1442	nand_controller_init(&nfc->controller);
   1443	nfc->controller.ops = &anfc_ops;
   1444	INIT_LIST_HEAD(&nfc->chips);
   1445
   1446	nfc->base = devm_platform_ioremap_resource(pdev, 0);
   1447	if (IS_ERR(nfc->base))
   1448		return PTR_ERR(nfc->base);
   1449
   1450	anfc_reset(nfc);
   1451
   1452	nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
   1453	if (IS_ERR(nfc->controller_clk))
   1454		return PTR_ERR(nfc->controller_clk);
   1455
   1456	nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
   1457	if (IS_ERR(nfc->bus_clk))
   1458		return PTR_ERR(nfc->bus_clk);
   1459
   1460	ret = clk_prepare_enable(nfc->controller_clk);
   1461	if (ret)
   1462		return ret;
   1463
   1464	ret = clk_prepare_enable(nfc->bus_clk);
   1465	if (ret)
   1466		goto disable_controller_clk;
   1467
   1468	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
   1469	if (ret)
   1470		goto disable_bus_clk;
   1471
   1472	ret = anfc_parse_cs(nfc);
   1473	if (ret)
   1474		goto disable_bus_clk;
   1475
   1476	ret = anfc_chips_init(nfc);
   1477	if (ret)
   1478		goto disable_bus_clk;
   1479
   1480	platform_set_drvdata(pdev, nfc);
   1481
   1482	return 0;
   1483
   1484disable_bus_clk:
   1485	clk_disable_unprepare(nfc->bus_clk);
   1486
   1487disable_controller_clk:
   1488	clk_disable_unprepare(nfc->controller_clk);
   1489
   1490	return ret;
   1491}
   1492
   1493static int anfc_remove(struct platform_device *pdev)
   1494{
   1495	struct arasan_nfc *nfc = platform_get_drvdata(pdev);
   1496
   1497	anfc_chips_cleanup(nfc);
   1498
   1499	clk_disable_unprepare(nfc->bus_clk);
   1500	clk_disable_unprepare(nfc->controller_clk);
   1501
   1502	return 0;
   1503}
   1504
   1505static const struct of_device_id anfc_ids[] = {
   1506	{
   1507		.compatible = "xlnx,zynqmp-nand-controller",
   1508	},
   1509	{
   1510		.compatible = "arasan,nfc-v3p10",
   1511	},
   1512	{}
   1513};
   1514MODULE_DEVICE_TABLE(of, anfc_ids);
   1515
   1516static struct platform_driver anfc_driver = {
   1517	.driver = {
   1518		.name = "arasan-nand-controller",
   1519		.of_match_table = anfc_ids,
   1520	},
   1521	.probe = anfc_probe,
   1522	.remove = anfc_remove,
   1523};
   1524module_platform_driver(anfc_driver);
   1525
   1526MODULE_LICENSE("GPL v2");
   1527MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
   1528MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
   1529MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
   1530MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");