cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lpc32xx_slc.c (29168B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * NXP LPC32XX NAND SLC driver
      4 *
      5 * Authors:
      6 *    Kevin Wells <kevin.wells@nxp.com>
      7 *    Roland Stigge <stigge@antcom.de>
      8 *
      9 * Copyright © 2011 NXP Semiconductors
     10 * Copyright © 2012 Roland Stigge
     11 */
     12
     13#include <linux/slab.h>
     14#include <linux/module.h>
     15#include <linux/platform_device.h>
     16#include <linux/mtd/mtd.h>
     17#include <linux/mtd/rawnand.h>
     18#include <linux/mtd/partitions.h>
     19#include <linux/clk.h>
     20#include <linux/err.h>
     21#include <linux/delay.h>
     22#include <linux/io.h>
     23#include <linux/mm.h>
     24#include <linux/dma-mapping.h>
     25#include <linux/dmaengine.h>
     26#include <linux/gpio.h>
     27#include <linux/of.h>
     28#include <linux/of_gpio.h>
     29#include <linux/mtd/lpc32xx_slc.h>
     30
     31#define LPC32XX_MODNAME		"lpc32xx-nand"
     32
     33/**********************************************************************
     34* SLC NAND controller register offsets
     35**********************************************************************/
     36
     37#define SLC_DATA(x)		(x + 0x000)
     38#define SLC_ADDR(x)		(x + 0x004)
     39#define SLC_CMD(x)		(x + 0x008)
     40#define SLC_STOP(x)		(x + 0x00C)
     41#define SLC_CTRL(x)		(x + 0x010)
     42#define SLC_CFG(x)		(x + 0x014)
     43#define SLC_STAT(x)		(x + 0x018)
     44#define SLC_INT_STAT(x)		(x + 0x01C)
     45#define SLC_IEN(x)		(x + 0x020)
     46#define SLC_ISR(x)		(x + 0x024)
     47#define SLC_ICR(x)		(x + 0x028)
     48#define SLC_TAC(x)		(x + 0x02C)
     49#define SLC_TC(x)		(x + 0x030)
     50#define SLC_ECC(x)		(x + 0x034)
     51#define SLC_DMA_DATA(x)		(x + 0x038)
     52
     53/**********************************************************************
     54* slc_ctrl register definitions
     55**********************************************************************/
     56#define SLCCTRL_SW_RESET	(1 << 2) /* Reset the NAND controller bit */
     57#define SLCCTRL_ECC_CLEAR	(1 << 1) /* Reset ECC bit */
     58#define SLCCTRL_DMA_START	(1 << 0) /* Start DMA channel bit */
     59
     60/**********************************************************************
     61* slc_cfg register definitions
     62**********************************************************************/
     63#define SLCCFG_CE_LOW		(1 << 5) /* Force CE low bit */
     64#define SLCCFG_DMA_ECC		(1 << 4) /* Enable DMA ECC bit */
     65#define SLCCFG_ECC_EN		(1 << 3) /* ECC enable bit */
     66#define SLCCFG_DMA_BURST	(1 << 2) /* DMA burst bit */
     67#define SLCCFG_DMA_DIR		(1 << 1) /* DMA write(0)/read(1) bit */
     68#define SLCCFG_WIDTH		(1 << 0) /* External device width, 0=8bit */
     69
     70/**********************************************************************
     71* slc_stat register definitions
     72**********************************************************************/
     73#define SLCSTAT_DMA_FIFO	(1 << 2) /* DMA FIFO has data bit */
     74#define SLCSTAT_SLC_FIFO	(1 << 1) /* SLC FIFO has data bit */
     75#define SLCSTAT_NAND_READY	(1 << 0) /* NAND device is ready bit */
     76
     77/**********************************************************************
     78* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
     79**********************************************************************/
     80#define SLCSTAT_INT_TC		(1 << 1) /* Transfer count bit */
     81#define SLCSTAT_INT_RDY_EN	(1 << 0) /* Ready interrupt bit */
     82
     83/**********************************************************************
     84* slc_tac register definitions
     85**********************************************************************/
     86/* Computation of clock cycles on basis of controller and device clock rates */
     87#define SLCTAC_CLOCKS(c, n, s)	(min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
     88
     89/* Clock setting for RDY write sample wait time in 2*n clocks */
     90#define SLCTAC_WDR(n)		(((n) & 0xF) << 28)
     91/* Write pulse width in clock cycles, 1 to 16 clocks */
     92#define SLCTAC_WWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 24))
     93/* Write hold time of control and data signals, 1 to 16 clocks */
     94#define SLCTAC_WHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 20))
     95/* Write setup time of control and data signals, 1 to 16 clocks */
     96#define SLCTAC_WSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 16))
     97/* Clock setting for RDY read sample wait time in 2*n clocks */
     98#define SLCTAC_RDR(n)		(((n) & 0xF) << 12)
     99/* Read pulse width in clock cycles, 1 to 16 clocks */
    100#define SLCTAC_RWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 8))
    101/* Read hold time of control and data signals, 1 to 16 clocks */
    102#define SLCTAC_RHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 4))
    103/* Read setup time of control and data signals, 1 to 16 clocks */
    104#define SLCTAC_RSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 0))
    105
    106/**********************************************************************
    107* slc_ecc register definitions
    108**********************************************************************/
    109/* ECC line party fetch macro */
    110#define SLCECC_TO_LINEPAR(n)	(((n) >> 6) & 0x7FFF)
    111#define SLCECC_TO_COLPAR(n)	((n) & 0x3F)
    112
    113/*
    114 * DMA requires storage space for the DMA local buffer and the hardware ECC
    115 * storage area. The DMA local buffer is only used if DMA mapping fails
    116 * during runtime.
    117 */
    118#define LPC32XX_DMA_DATA_SIZE		4096
    119#define LPC32XX_ECC_SAVE_SIZE		((4096 / 256) * 4)
    120
    121/* Number of bytes used for ECC stored in NAND per 256 bytes */
    122#define LPC32XX_SLC_DEV_ECC_BYTES	3
    123
    124/*
    125 * If the NAND base clock frequency can't be fetched, this frequency will be
    126 * used instead as the base. This rate is used to setup the timing registers
    127 * used for NAND accesses.
    128 */
    129#define LPC32XX_DEF_BUS_RATE		133250000
    130
    131/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
    132#define LPC32XX_DMA_TIMEOUT		100
    133
    134/*
    135 * NAND ECC Layout for small page NAND devices
    136 * Note: For large and huge page devices, the default layouts are used
    137 */
    138static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
    139				 struct mtd_oob_region *oobregion)
    140{
    141	if (section)
    142		return -ERANGE;
    143
    144	oobregion->length = 6;
    145	oobregion->offset = 10;
    146
    147	return 0;
    148}
    149
    150static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
    151				  struct mtd_oob_region *oobregion)
    152{
    153	if (section > 1)
    154		return -ERANGE;
    155
    156	if (!section) {
    157		oobregion->offset = 0;
    158		oobregion->length = 4;
    159	} else {
    160		oobregion->offset = 6;
    161		oobregion->length = 4;
    162	}
    163
    164	return 0;
    165}
    166
    167static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
    168	.ecc = lpc32xx_ooblayout_ecc,
    169	.free = lpc32xx_ooblayout_free,
    170};
    171
    172static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
    173static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
    174
    175/*
    176 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
    177 * Note: Large page devices used the default layout
    178 */
    179static struct nand_bbt_descr bbt_smallpage_main_descr = {
    180	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
    181		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
    182	.offs =	0,
    183	.len = 4,
    184	.veroffs = 6,
    185	.maxblocks = 4,
    186	.pattern = bbt_pattern
    187};
    188
    189static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
    190	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
    191		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
    192	.offs =	0,
    193	.len = 4,
    194	.veroffs = 6,
    195	.maxblocks = 4,
    196	.pattern = mirror_pattern
    197};
    198
    199/*
    200 * NAND platform configuration structure
    201 */
    202struct lpc32xx_nand_cfg_slc {
    203	uint32_t wdr_clks;
    204	uint32_t wwidth;
    205	uint32_t whold;
    206	uint32_t wsetup;
    207	uint32_t rdr_clks;
    208	uint32_t rwidth;
    209	uint32_t rhold;
    210	uint32_t rsetup;
    211	int wp_gpio;
    212	struct mtd_partition *parts;
    213	unsigned num_parts;
    214};
    215
    216struct lpc32xx_nand_host {
    217	struct nand_chip	nand_chip;
    218	struct lpc32xx_slc_platform_data *pdata;
    219	struct clk		*clk;
    220	void __iomem		*io_base;
    221	struct lpc32xx_nand_cfg_slc *ncfg;
    222
    223	struct completion	comp;
    224	struct dma_chan		*dma_chan;
    225	uint32_t		dma_buf_len;
    226	struct dma_slave_config	dma_slave_config;
    227	struct scatterlist	sgl;
    228
    229	/*
    230	 * DMA and CPU addresses of ECC work area and data buffer
    231	 */
    232	uint32_t		*ecc_buf;
    233	uint8_t			*data_buf;
    234	dma_addr_t		io_base_dma;
    235};
    236
    237static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
    238{
    239	uint32_t clkrate, tmp;
    240
    241	/* Reset SLC controller */
    242	writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
    243	udelay(1000);
    244
    245	/* Basic setup */
    246	writel(0, SLC_CFG(host->io_base));
    247	writel(0, SLC_IEN(host->io_base));
    248	writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
    249		SLC_ICR(host->io_base));
    250
    251	/* Get base clock for SLC block */
    252	clkrate = clk_get_rate(host->clk);
    253	if (clkrate == 0)
    254		clkrate = LPC32XX_DEF_BUS_RATE;
    255
    256	/* Compute clock setup values */
    257	tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
    258		SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
    259		SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
    260		SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
    261		SLCTAC_RDR(host->ncfg->rdr_clks) |
    262		SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
    263		SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
    264		SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
    265	writel(tmp, SLC_TAC(host->io_base));
    266}
    267
    268/*
    269 * Hardware specific access to control lines
    270 */
    271static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
    272				  unsigned int ctrl)
    273{
    274	uint32_t tmp;
    275	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    276
    277	/* Does CE state need to be changed? */
    278	tmp = readl(SLC_CFG(host->io_base));
    279	if (ctrl & NAND_NCE)
    280		tmp |= SLCCFG_CE_LOW;
    281	else
    282		tmp &= ~SLCCFG_CE_LOW;
    283	writel(tmp, SLC_CFG(host->io_base));
    284
    285	if (cmd != NAND_CMD_NONE) {
    286		if (ctrl & NAND_CLE)
    287			writel(cmd, SLC_CMD(host->io_base));
    288		else
    289			writel(cmd, SLC_ADDR(host->io_base));
    290	}
    291}
    292
    293/*
    294 * Read the Device Ready pin
    295 */
    296static int lpc32xx_nand_device_ready(struct nand_chip *chip)
    297{
    298	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    299	int rdy = 0;
    300
    301	if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
    302		rdy = 1;
    303
    304	return rdy;
    305}
    306
    307/*
    308 * Enable NAND write protect
    309 */
    310static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
    311{
    312	if (gpio_is_valid(host->ncfg->wp_gpio))
    313		gpio_set_value(host->ncfg->wp_gpio, 0);
    314}
    315
    316/*
    317 * Disable NAND write protect
    318 */
    319static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
    320{
    321	if (gpio_is_valid(host->ncfg->wp_gpio))
    322		gpio_set_value(host->ncfg->wp_gpio, 1);
    323}
    324
    325/*
    326 * Prepares SLC for transfers with H/W ECC enabled
    327 */
    328static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
    329{
    330	/* Hardware ECC is enabled automatically in hardware as needed */
    331}
    332
    333/*
    334 * Calculates the ECC for the data
    335 */
    336static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
    337				      const unsigned char *buf,
    338				      unsigned char *code)
    339{
    340	/*
    341	 * ECC is calculated automatically in hardware during syndrome read
    342	 * and write operations, so it doesn't need to be calculated here.
    343	 */
    344	return 0;
    345}
    346
    347/*
    348 * Read a single byte from NAND device
    349 */
    350static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
    351{
    352	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    353
    354	return (uint8_t)readl(SLC_DATA(host->io_base));
    355}
    356
    357/*
    358 * Simple device read without ECC
    359 */
    360static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
    361{
    362	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    363
    364	/* Direct device read with no ECC */
    365	while (len-- > 0)
    366		*buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
    367}
    368
    369/*
    370 * Simple device write without ECC
    371 */
    372static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
    373				   int len)
    374{
    375	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    376
    377	/* Direct device write with no ECC */
    378	while (len-- > 0)
    379		writel((uint32_t)*buf++, SLC_DATA(host->io_base));
    380}
    381
    382/*
    383 * Read the OOB data from the device without ECC using FIFO method
    384 */
    385static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
    386{
    387	struct mtd_info *mtd = nand_to_mtd(chip);
    388
    389	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
    390}
    391
    392/*
    393 * Write the OOB data to the device without ECC using FIFO method
    394 */
    395static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
    396{
    397	struct mtd_info *mtd = nand_to_mtd(chip);
    398
    399	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
    400				 mtd->oobsize);
    401}
    402
    403/*
    404 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
    405 */
    406static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
    407{
    408	int i;
    409
    410	for (i = 0; i < (count * 3); i += 3) {
    411		uint32_t ce = ecc[i / 3];
    412		ce = ~(ce << 2) & 0xFFFFFF;
    413		spare[i + 2] = (uint8_t)(ce & 0xFF);
    414		ce >>= 8;
    415		spare[i + 1] = (uint8_t)(ce & 0xFF);
    416		ce >>= 8;
    417		spare[i] = (uint8_t)(ce & 0xFF);
    418	}
    419}
    420
    421static void lpc32xx_dma_complete_func(void *completion)
    422{
    423	complete(completion);
    424}
    425
    426static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
    427			    void *mem, int len, enum dma_transfer_direction dir)
    428{
    429	struct nand_chip *chip = mtd_to_nand(mtd);
    430	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    431	struct dma_async_tx_descriptor *desc;
    432	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
    433	int res;
    434
    435	host->dma_slave_config.direction = dir;
    436	host->dma_slave_config.src_addr = dma;
    437	host->dma_slave_config.dst_addr = dma;
    438	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    439	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    440	host->dma_slave_config.src_maxburst = 4;
    441	host->dma_slave_config.dst_maxburst = 4;
    442	/* DMA controller does flow control: */
    443	host->dma_slave_config.device_fc = false;
    444	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
    445		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
    446		return -ENXIO;
    447	}
    448
    449	sg_init_one(&host->sgl, mem, len);
    450
    451	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
    452			 DMA_BIDIRECTIONAL);
    453	if (res != 1) {
    454		dev_err(mtd->dev.parent, "Failed to map sg list\n");
    455		return -ENXIO;
    456	}
    457	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
    458				       flags);
    459	if (!desc) {
    460		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
    461		goto out1;
    462	}
    463
    464	init_completion(&host->comp);
    465	desc->callback = lpc32xx_dma_complete_func;
    466	desc->callback_param = &host->comp;
    467
    468	dmaengine_submit(desc);
    469	dma_async_issue_pending(host->dma_chan);
    470
    471	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
    472
    473	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
    474		     DMA_BIDIRECTIONAL);
    475
    476	return 0;
    477out1:
    478	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
    479		     DMA_BIDIRECTIONAL);
    480	return -ENXIO;
    481}
    482
    483/*
    484 * DMA read/write transfers with ECC support
    485 */
    486static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
    487			int read)
    488{
    489	struct nand_chip *chip = mtd_to_nand(mtd);
    490	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    491	int i, status = 0;
    492	unsigned long timeout;
    493	int res;
    494	enum dma_transfer_direction dir =
    495		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
    496	uint8_t *dma_buf;
    497	bool dma_mapped;
    498
    499	if ((void *)buf <= high_memory) {
    500		dma_buf = buf;
    501		dma_mapped = true;
    502	} else {
    503		dma_buf = host->data_buf;
    504		dma_mapped = false;
    505		if (!read)
    506			memcpy(host->data_buf, buf, mtd->writesize);
    507	}
    508
    509	if (read) {
    510		writel(readl(SLC_CFG(host->io_base)) |
    511		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
    512		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
    513	} else {
    514		writel((readl(SLC_CFG(host->io_base)) |
    515			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
    516		       ~SLCCFG_DMA_DIR,
    517			SLC_CFG(host->io_base));
    518	}
    519
    520	/* Clear initial ECC */
    521	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
    522
    523	/* Transfer size is data area only */
    524	writel(mtd->writesize, SLC_TC(host->io_base));
    525
    526	/* Start transfer in the NAND controller */
    527	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
    528	       SLC_CTRL(host->io_base));
    529
    530	for (i = 0; i < chip->ecc.steps; i++) {
    531		/* Data */
    532		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
    533				       dma_buf + i * chip->ecc.size,
    534				       mtd->writesize / chip->ecc.steps, dir);
    535		if (res)
    536			return res;
    537
    538		/* Always _read_ ECC */
    539		if (i == chip->ecc.steps - 1)
    540			break;
    541		if (!read) /* ECC availability delayed on write */
    542			udelay(10);
    543		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
    544				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
    545		if (res)
    546			return res;
    547	}
    548
    549	/*
    550	 * According to NXP, the DMA can be finished here, but the NAND
    551	 * controller may still have buffered data. After porting to using the
    552	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
    553	 * appears to be always true, according to tests. Keeping the check for
    554	 * safety reasons for now.
    555	 */
    556	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
    557		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
    558		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
    559		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
    560		       time_before(jiffies, timeout))
    561			cpu_relax();
    562		if (!time_before(jiffies, timeout)) {
    563			dev_err(mtd->dev.parent, "FIFO held data too long\n");
    564			status = -EIO;
    565		}
    566	}
    567
    568	/* Read last calculated ECC value */
    569	if (!read)
    570		udelay(10);
    571	host->ecc_buf[chip->ecc.steps - 1] =
    572		readl(SLC_ECC(host->io_base));
    573
    574	/* Flush DMA */
    575	dmaengine_terminate_all(host->dma_chan);
    576
    577	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
    578	    readl(SLC_TC(host->io_base))) {
    579		/* Something is left in the FIFO, something is wrong */
    580		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
    581		status = -EIO;
    582	}
    583
    584	/* Stop DMA & HW ECC */
    585	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
    586	       SLC_CTRL(host->io_base));
    587	writel(readl(SLC_CFG(host->io_base)) &
    588	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
    589		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
    590
    591	if (!dma_mapped && read)
    592		memcpy(buf, host->data_buf, mtd->writesize);
    593
    594	return status;
    595}
    596
    597/*
    598 * Read the data and OOB data from the device, use ECC correction with the
    599 * data, disable ECC for the OOB data
    600 */
    601static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
    602					   int oob_required, int page)
    603{
    604	struct mtd_info *mtd = nand_to_mtd(chip);
    605	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    606	struct mtd_oob_region oobregion = { };
    607	int stat, i, status, error;
    608	uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
    609
    610	/* Issue read command */
    611	nand_read_page_op(chip, page, 0, NULL, 0);
    612
    613	/* Read data and oob, calculate ECC */
    614	status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
    615
    616	/* Get OOB data */
    617	chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
    618
    619	/* Convert to stored ECC format */
    620	lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
    621
    622	/* Pointer to ECC data retrieved from NAND spare area */
    623	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
    624	if (error)
    625		return error;
    626
    627	oobecc = chip->oob_poi + oobregion.offset;
    628
    629	for (i = 0; i < chip->ecc.steps; i++) {
    630		stat = chip->ecc.correct(chip, buf, oobecc,
    631					 &tmpecc[i * chip->ecc.bytes]);
    632		if (stat < 0)
    633			mtd->ecc_stats.failed++;
    634		else
    635			mtd->ecc_stats.corrected += stat;
    636
    637		buf += chip->ecc.size;
    638		oobecc += chip->ecc.bytes;
    639	}
    640
    641	return status;
    642}
    643
    644/*
    645 * Read the data and OOB data from the device, no ECC correction with the
    646 * data or OOB data
    647 */
    648static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
    649					       uint8_t *buf, int oob_required,
    650					       int page)
    651{
    652	struct mtd_info *mtd = nand_to_mtd(chip);
    653
    654	/* Issue read command */
    655	nand_read_page_op(chip, page, 0, NULL, 0);
    656
    657	/* Raw reads can just use the FIFO interface */
    658	chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
    659	chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
    660
    661	return 0;
    662}
    663
    664/*
    665 * Write the data and OOB data to the device, use ECC with the data,
    666 * disable ECC for the OOB data
    667 */
    668static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
    669					    const uint8_t *buf,
    670					    int oob_required, int page)
    671{
    672	struct mtd_info *mtd = nand_to_mtd(chip);
    673	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    674	struct mtd_oob_region oobregion = { };
    675	uint8_t *pb;
    676	int error;
    677
    678	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
    679
    680	/* Write data, calculate ECC on outbound data */
    681	error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
    682	if (error)
    683		return error;
    684
    685	/*
    686	 * The calculated ECC needs some manual work done to it before
    687	 * committing it to NAND. Process the calculated ECC and place
    688	 * the resultant values directly into the OOB buffer. */
    689	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
    690	if (error)
    691		return error;
    692
    693	pb = chip->oob_poi + oobregion.offset;
    694	lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
    695
    696	/* Write ECC data to device */
    697	chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
    698
    699	return nand_prog_page_end_op(chip);
    700}
    701
    702/*
    703 * Write the data and OOB data to the device, no ECC correction with the
    704 * data or OOB data
    705 */
    706static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
    707						const uint8_t *buf,
    708						int oob_required, int page)
    709{
    710	struct mtd_info *mtd = nand_to_mtd(chip);
    711
    712	/* Raw writes can just use the FIFO interface */
    713	nand_prog_page_begin_op(chip, page, 0, buf,
    714				chip->ecc.size * chip->ecc.steps);
    715	chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
    716
    717	return nand_prog_page_end_op(chip);
    718}
    719
    720static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
    721{
    722	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
    723	dma_cap_mask_t mask;
    724
    725	if (!host->pdata || !host->pdata->dma_filter) {
    726		dev_err(mtd->dev.parent, "no DMA platform data\n");
    727		return -ENOENT;
    728	}
    729
    730	dma_cap_zero(mask);
    731	dma_cap_set(DMA_SLAVE, mask);
    732	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
    733					     "nand-slc");
    734	if (!host->dma_chan) {
    735		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
    736		return -EBUSY;
    737	}
    738
    739	return 0;
    740}
    741
    742static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
    743{
    744	struct lpc32xx_nand_cfg_slc *ncfg;
    745	struct device_node *np = dev->of_node;
    746
    747	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
    748	if (!ncfg)
    749		return NULL;
    750
    751	of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
    752	of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
    753	of_property_read_u32(np, "nxp,whold", &ncfg->whold);
    754	of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
    755	of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
    756	of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
    757	of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
    758	of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
    759
    760	if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
    761	    !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
    762	    !ncfg->rhold || !ncfg->rsetup) {
    763		dev_err(dev, "chip parameters not specified correctly\n");
    764		return NULL;
    765	}
    766
    767	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
    768
    769	return ncfg;
    770}
    771
    772static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
    773{
    774	struct mtd_info *mtd = nand_to_mtd(chip);
    775	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
    776
    777	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
    778		return 0;
    779
    780	/* OOB and ECC CPU and DMA work areas */
    781	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
    782
    783	/*
    784	 * Small page FLASH has a unique OOB layout, but large and huge
    785	 * page FLASH use the standard layout. Small page FLASH uses a
    786	 * custom BBT marker layout.
    787	 */
    788	if (mtd->writesize <= 512)
    789		mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
    790
    791	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
    792	/* These sizes remain the same regardless of page size */
    793	chip->ecc.size = 256;
    794	chip->ecc.strength = 1;
    795	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
    796	chip->ecc.prepad = 0;
    797	chip->ecc.postpad = 0;
    798	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
    799	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
    800	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
    801	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
    802	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
    803	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
    804	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
    805	chip->ecc.correct = rawnand_sw_hamming_correct;
    806	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
    807
    808	/*
    809	 * Use a custom BBT marker setup for small page FLASH that
    810	 * won't interfere with the ECC layout. Large and huge page
    811	 * FLASH use the standard layout.
    812	 */
    813	if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
    814	    mtd->writesize <= 512) {
    815		chip->bbt_td = &bbt_smallpage_main_descr;
    816		chip->bbt_md = &bbt_smallpage_mirror_descr;
    817	}
    818
    819	return 0;
    820}
    821
    822static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
    823	.attach_chip = lpc32xx_nand_attach_chip,
    824};
    825
    826/*
    827 * Probe for NAND controller
    828 */
    829static int lpc32xx_nand_probe(struct platform_device *pdev)
    830{
    831	struct lpc32xx_nand_host *host;
    832	struct mtd_info *mtd;
    833	struct nand_chip *chip;
    834	struct resource *rc;
    835	int res;
    836
    837	/* Allocate memory for the device structure (and zero it) */
    838	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
    839	if (!host)
    840		return -ENOMEM;
    841
    842	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    843	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
    844	if (IS_ERR(host->io_base))
    845		return PTR_ERR(host->io_base);
    846
    847	host->io_base_dma = rc->start;
    848	if (pdev->dev.of_node)
    849		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
    850	if (!host->ncfg) {
    851		dev_err(&pdev->dev,
    852			"Missing or bad NAND config from device tree\n");
    853		return -ENOENT;
    854	}
    855	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
    856		return -EPROBE_DEFER;
    857	if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
    858			host->ncfg->wp_gpio, "NAND WP")) {
    859		dev_err(&pdev->dev, "GPIO not available\n");
    860		return -EBUSY;
    861	}
    862	lpc32xx_wp_disable(host);
    863
    864	host->pdata = dev_get_platdata(&pdev->dev);
    865
    866	chip = &host->nand_chip;
    867	mtd = nand_to_mtd(chip);
    868	nand_set_controller_data(chip, host);
    869	nand_set_flash_node(chip, pdev->dev.of_node);
    870	mtd->owner = THIS_MODULE;
    871	mtd->dev.parent = &pdev->dev;
    872
    873	/* Get NAND clock */
    874	host->clk = devm_clk_get(&pdev->dev, NULL);
    875	if (IS_ERR(host->clk)) {
    876		dev_err(&pdev->dev, "Clock failure\n");
    877		res = -ENOENT;
    878		goto enable_wp;
    879	}
    880	res = clk_prepare_enable(host->clk);
    881	if (res)
    882		goto enable_wp;
    883
    884	/* Set NAND IO addresses and command/ready functions */
    885	chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
    886	chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
    887	chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
    888	chip->legacy.dev_ready = lpc32xx_nand_device_ready;
    889	chip->legacy.chip_delay = 20; /* 20us command delay time */
    890
    891	/* Init NAND controller */
    892	lpc32xx_nand_setup(host);
    893
    894	platform_set_drvdata(pdev, host);
    895
    896	/* NAND callbacks for LPC32xx SLC hardware */
    897	chip->legacy.read_byte = lpc32xx_nand_read_byte;
    898	chip->legacy.read_buf = lpc32xx_nand_read_buf;
    899	chip->legacy.write_buf = lpc32xx_nand_write_buf;
    900
    901	/*
    902	 * Allocate a large enough buffer for a single huge page plus
    903	 * extra space for the spare area and ECC storage area
    904	 */
    905	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
    906	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
    907				      GFP_KERNEL);
    908	if (host->data_buf == NULL) {
    909		res = -ENOMEM;
    910		goto unprepare_clk;
    911	}
    912
    913	res = lpc32xx_nand_dma_setup(host);
    914	if (res) {
    915		res = -EIO;
    916		goto unprepare_clk;
    917	}
    918
    919	/* Find NAND device */
    920	chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
    921	res = nand_scan(chip, 1);
    922	if (res)
    923		goto release_dma;
    924
    925	mtd->name = "nxp_lpc3220_slc";
    926	res = mtd_device_register(mtd, host->ncfg->parts,
    927				  host->ncfg->num_parts);
    928	if (res)
    929		goto cleanup_nand;
    930
    931	return 0;
    932
    933cleanup_nand:
    934	nand_cleanup(chip);
    935release_dma:
    936	dma_release_channel(host->dma_chan);
    937unprepare_clk:
    938	clk_disable_unprepare(host->clk);
    939enable_wp:
    940	lpc32xx_wp_enable(host);
    941
    942	return res;
    943}
    944
    945/*
    946 * Remove NAND device.
    947 */
    948static int lpc32xx_nand_remove(struct platform_device *pdev)
    949{
    950	uint32_t tmp;
    951	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
    952	struct nand_chip *chip = &host->nand_chip;
    953	int ret;
    954
    955	ret = mtd_device_unregister(nand_to_mtd(chip));
    956	WARN_ON(ret);
    957	nand_cleanup(chip);
    958	dma_release_channel(host->dma_chan);
    959
    960	/* Force CE high */
    961	tmp = readl(SLC_CTRL(host->io_base));
    962	tmp &= ~SLCCFG_CE_LOW;
    963	writel(tmp, SLC_CTRL(host->io_base));
    964
    965	clk_disable_unprepare(host->clk);
    966	lpc32xx_wp_enable(host);
    967
    968	return 0;
    969}
    970
    971#ifdef CONFIG_PM
    972static int lpc32xx_nand_resume(struct platform_device *pdev)
    973{
    974	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
    975	int ret;
    976
    977	/* Re-enable NAND clock */
    978	ret = clk_prepare_enable(host->clk);
    979	if (ret)
    980		return ret;
    981
    982	/* Fresh init of NAND controller */
    983	lpc32xx_nand_setup(host);
    984
    985	/* Disable write protect */
    986	lpc32xx_wp_disable(host);
    987
    988	return 0;
    989}
    990
    991static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
    992{
    993	uint32_t tmp;
    994	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
    995
    996	/* Force CE high */
    997	tmp = readl(SLC_CTRL(host->io_base));
    998	tmp &= ~SLCCFG_CE_LOW;
    999	writel(tmp, SLC_CTRL(host->io_base));
   1000
   1001	/* Enable write protect for safety */
   1002	lpc32xx_wp_enable(host);
   1003
   1004	/* Disable clock */
   1005	clk_disable_unprepare(host->clk);
   1006
   1007	return 0;
   1008}
   1009
   1010#else
   1011#define lpc32xx_nand_resume NULL
   1012#define lpc32xx_nand_suspend NULL
   1013#endif
   1014
   1015static const struct of_device_id lpc32xx_nand_match[] = {
   1016	{ .compatible = "nxp,lpc3220-slc" },
   1017	{ /* sentinel */ },
   1018};
   1019MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
   1020
   1021static struct platform_driver lpc32xx_nand_driver = {
   1022	.probe		= lpc32xx_nand_probe,
   1023	.remove		= lpc32xx_nand_remove,
   1024	.resume		= lpc32xx_nand_resume,
   1025	.suspend	= lpc32xx_nand_suspend,
   1026	.driver		= {
   1027		.name	= LPC32XX_MODNAME,
   1028		.of_match_table = lpc32xx_nand_match,
   1029	},
   1030};
   1031
   1032module_platform_driver(lpc32xx_nand_driver);
   1033
   1034MODULE_LICENSE("GPL");
   1035MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
   1036MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
   1037MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");