cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

davinci_nand.c (24002B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * davinci_nand.c - NAND Flash Driver for DaVinci family chips
      4 *
      5 * Copyright © 2006 Texas Instruments.
      6 *
      7 * Port to 2.6.23 Copyright © 2008 by:
      8 *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
      9 *   Troy Kisky <troy.kisky@boundarydevices.com>
     10 *   Dirk Behme <Dirk.Behme@gmail.com>
     11 */
     12
     13#include <linux/kernel.h>
     14#include <linux/module.h>
     15#include <linux/platform_device.h>
     16#include <linux/err.h>
     17#include <linux/iopoll.h>
     18#include <linux/mtd/rawnand.h>
     19#include <linux/mtd/partitions.h>
     20#include <linux/slab.h>
     21#include <linux/of_device.h>
     22#include <linux/of.h>
     23
     24#include <linux/platform_data/mtd-davinci.h>
     25#include <linux/platform_data/mtd-davinci-aemif.h>
     26
     27/*
     28 * This is a device driver for the NAND flash controller found on the
     29 * various DaVinci family chips.  It handles up to four SoC chipselects,
     30 * and some flavors of secondary chipselect (e.g. based on A12) as used
     31 * with multichip packages.
     32 *
     33 * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
     34 * available on chips like the DM355 and OMAP-L137 and needed with the
     35 * more error-prone MLC NAND chips.
     36 *
     37 * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
     38 * outputs in a "wire-AND" configuration, with no per-chip signals.
     39 */
     40struct davinci_nand_info {
     41	struct nand_controller	controller;
     42	struct nand_chip	chip;
     43
     44	struct platform_device	*pdev;
     45
     46	bool			is_readmode;
     47
     48	void __iomem		*base;
     49	void __iomem		*vaddr;
     50
     51	void __iomem		*current_cs;
     52
     53	uint32_t		mask_chipsel;
     54	uint32_t		mask_ale;
     55	uint32_t		mask_cle;
     56
     57	uint32_t		core_chipsel;
     58
     59	struct davinci_aemif_timing	*timing;
     60};
     61
     62static DEFINE_SPINLOCK(davinci_nand_lock);
     63static bool ecc4_busy;
     64
     65static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
     66{
     67	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
     68}
     69
     70static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
     71		int offset)
     72{
     73	return __raw_readl(info->base + offset);
     74}
     75
     76static inline void davinci_nand_writel(struct davinci_nand_info *info,
     77		int offset, unsigned long value)
     78{
     79	__raw_writel(value, info->base + offset);
     80}
     81
     82/*----------------------------------------------------------------------*/
     83
     84/*
     85 * 1-bit hardware ECC ... context maintained for each core chipselect
     86 */
     87
     88static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
     89{
     90	struct davinci_nand_info *info = to_davinci_nand(mtd);
     91
     92	return davinci_nand_readl(info, NANDF1ECC_OFFSET
     93			+ 4 * info->core_chipsel);
     94}
     95
     96static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
     97{
     98	struct davinci_nand_info *info;
     99	uint32_t nandcfr;
    100	unsigned long flags;
    101
    102	info = to_davinci_nand(nand_to_mtd(chip));
    103
    104	/* Reset ECC hardware */
    105	nand_davinci_readecc_1bit(nand_to_mtd(chip));
    106
    107	spin_lock_irqsave(&davinci_nand_lock, flags);
    108
    109	/* Restart ECC hardware */
    110	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
    111	nandcfr |= BIT(8 + info->core_chipsel);
    112	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
    113
    114	spin_unlock_irqrestore(&davinci_nand_lock, flags);
    115}
    116
    117/*
    118 * Read hardware ECC value and pack into three bytes
    119 */
    120static int nand_davinci_calculate_1bit(struct nand_chip *chip,
    121				       const u_char *dat, u_char *ecc_code)
    122{
    123	unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
    124	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
    125
    126	/* invert so that erased block ecc is correct */
    127	ecc24 = ~ecc24;
    128	ecc_code[0] = (u_char)(ecc24);
    129	ecc_code[1] = (u_char)(ecc24 >> 8);
    130	ecc_code[2] = (u_char)(ecc24 >> 16);
    131
    132	return 0;
    133}
    134
    135static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
    136				     u_char *read_ecc, u_char *calc_ecc)
    137{
    138	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
    139					  (read_ecc[2] << 16);
    140	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
    141					  (calc_ecc[2] << 16);
    142	uint32_t diff = eccCalc ^ eccNand;
    143
    144	if (diff) {
    145		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
    146			/* Correctable error */
    147			if ((diff >> (12 + 3)) < chip->ecc.size) {
    148				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
    149				return 1;
    150			} else {
    151				return -EBADMSG;
    152			}
    153		} else if (!(diff & (diff - 1))) {
    154			/* Single bit ECC error in the ECC itself,
    155			 * nothing to fix */
    156			return 1;
    157		} else {
    158			/* Uncorrectable error */
    159			return -EBADMSG;
    160		}
    161
    162	}
    163	return 0;
    164}
    165
    166/*----------------------------------------------------------------------*/
    167
    168/*
    169 * 4-bit hardware ECC ... context maintained over entire AEMIF
    170 *
    171 * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
    172 * since that forces use of a problematic "infix OOB" layout.
    173 * Among other things, it trashes manufacturer bad block markers.
    174 * Also, and specific to this hardware, it ECC-protects the "prepad"
    175 * in the OOB ... while having ECC protection for parts of OOB would
    176 * seem useful, the current MTD stack sometimes wants to update the
    177 * OOB without recomputing ECC.
    178 */
    179
    180static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
    181{
    182	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
    183	unsigned long flags;
    184	u32 val;
    185
    186	/* Reset ECC hardware */
    187	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
    188
    189	spin_lock_irqsave(&davinci_nand_lock, flags);
    190
    191	/* Start 4-bit ECC calculation for read/write */
    192	val = davinci_nand_readl(info, NANDFCR_OFFSET);
    193	val &= ~(0x03 << 4);
    194	val |= (info->core_chipsel << 4) | BIT(12);
    195	davinci_nand_writel(info, NANDFCR_OFFSET, val);
    196
    197	info->is_readmode = (mode == NAND_ECC_READ);
    198
    199	spin_unlock_irqrestore(&davinci_nand_lock, flags);
    200}
    201
    202/* Read raw ECC code after writing to NAND. */
    203static void
    204nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
    205{
    206	const u32 mask = 0x03ff03ff;
    207
    208	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
    209	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
    210	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
    211	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
    212}
    213
    214/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
    215static int nand_davinci_calculate_4bit(struct nand_chip *chip,
    216				       const u_char *dat, u_char *ecc_code)
    217{
    218	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
    219	u32 raw_ecc[4], *p;
    220	unsigned i;
    221
    222	/* After a read, terminate ECC calculation by a dummy read
    223	 * of some 4-bit ECC register.  ECC covers everything that
    224	 * was read; correct() just uses the hardware state, so
    225	 * ecc_code is not needed.
    226	 */
    227	if (info->is_readmode) {
    228		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
    229		return 0;
    230	}
    231
    232	/* Pack eight raw 10-bit ecc values into ten bytes, making
    233	 * two passes which each convert four values (in upper and
    234	 * lower halves of two 32-bit words) into five bytes.  The
    235	 * ROM boot loader uses this same packing scheme.
    236	 */
    237	nand_davinci_readecc_4bit(info, raw_ecc);
    238	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
    239		*ecc_code++ =   p[0]        & 0xff;
    240		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
    241		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
    242		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
    243		*ecc_code++ =  (p[1] >> 18) & 0xff;
    244	}
    245
    246	return 0;
    247}
    248
    249/* Correct up to 4 bits in data we just read, using state left in the
    250 * hardware plus the ecc_code computed when it was first written.
    251 */
    252static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
    253				     u_char *ecc_code, u_char *null)
    254{
    255	int i;
    256	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
    257	unsigned short ecc10[8];
    258	unsigned short *ecc16;
    259	u32 syndrome[4];
    260	u32 ecc_state;
    261	unsigned num_errors, corrected;
    262	unsigned long timeo;
    263
    264	/* Unpack ten bytes into eight 10 bit values.  We know we're
    265	 * little-endian, and use type punning for less shifting/masking.
    266	 */
    267	if (WARN_ON(0x01 & (uintptr_t)ecc_code))
    268		return -EINVAL;
    269	ecc16 = (unsigned short *)ecc_code;
    270
    271	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
    272	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
    273	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
    274	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
    275	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
    276	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
    277	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
    278	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
    279
    280	/* Tell ECC controller about the expected ECC codes. */
    281	for (i = 7; i >= 0; i--)
    282		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
    283
    284	/* Allow time for syndrome calculation ... then read it.
    285	 * A syndrome of all zeroes 0 means no detected errors.
    286	 */
    287	davinci_nand_readl(info, NANDFSR_OFFSET);
    288	nand_davinci_readecc_4bit(info, syndrome);
    289	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
    290		return 0;
    291
    292	/*
    293	 * Clear any previous address calculation by doing a dummy read of an
    294	 * error address register.
    295	 */
    296	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
    297
    298	/* Start address calculation, and wait for it to complete.
    299	 * We _could_ start reading more data while this is working,
    300	 * to speed up the overall page read.
    301	 */
    302	davinci_nand_writel(info, NANDFCR_OFFSET,
    303			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
    304
    305	/*
    306	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
    307	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
    308	 * begin trying to poll for the state, you may fall right out of your
    309	 * loop without any of the correction calculations having taken place.
    310	 * The recommendation from the hardware team is to initially delay as
    311	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
    312	 * correction state.
    313	 */
    314	timeo = jiffies + usecs_to_jiffies(100);
    315	do {
    316		ecc_state = (davinci_nand_readl(info,
    317				NANDFSR_OFFSET) >> 8) & 0x0f;
    318		cpu_relax();
    319	} while ((ecc_state < 4) && time_before(jiffies, timeo));
    320
    321	for (;;) {
    322		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
    323
    324		switch ((fsr >> 8) & 0x0f) {
    325		case 0:		/* no error, should not happen */
    326			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
    327			return 0;
    328		case 1:		/* five or more errors detected */
    329			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
    330			return -EBADMSG;
    331		case 2:		/* error addresses computed */
    332		case 3:
    333			num_errors = 1 + ((fsr >> 16) & 0x03);
    334			goto correct;
    335		default:	/* still working on it */
    336			cpu_relax();
    337			continue;
    338		}
    339	}
    340
    341correct:
    342	/* correct each error */
    343	for (i = 0, corrected = 0; i < num_errors; i++) {
    344		int error_address, error_value;
    345
    346		if (i > 1) {
    347			error_address = davinci_nand_readl(info,
    348						NAND_ERR_ADD2_OFFSET);
    349			error_value = davinci_nand_readl(info,
    350						NAND_ERR_ERRVAL2_OFFSET);
    351		} else {
    352			error_address = davinci_nand_readl(info,
    353						NAND_ERR_ADD1_OFFSET);
    354			error_value = davinci_nand_readl(info,
    355						NAND_ERR_ERRVAL1_OFFSET);
    356		}
    357
    358		if (i & 1) {
    359			error_address >>= 16;
    360			error_value >>= 16;
    361		}
    362		error_address &= 0x3ff;
    363		error_address = (512 + 7) - error_address;
    364
    365		if (error_address < 512) {
    366			data[error_address] ^= error_value;
    367			corrected++;
    368		}
    369	}
    370
    371	return corrected;
    372}
    373
    374/*----------------------------------------------------------------------*/
    375
    376/* An ECC layout for using 4-bit ECC with small-page flash, storing
    377 * ten ECC bytes plus the manufacturer's bad block marker byte, and
    378 * and not overlapping the default BBT markers.
    379 */
    380static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
    381				      struct mtd_oob_region *oobregion)
    382{
    383	if (section > 2)
    384		return -ERANGE;
    385
    386	if (!section) {
    387		oobregion->offset = 0;
    388		oobregion->length = 5;
    389	} else if (section == 1) {
    390		oobregion->offset = 6;
    391		oobregion->length = 2;
    392	} else {
    393		oobregion->offset = 13;
    394		oobregion->length = 3;
    395	}
    396
    397	return 0;
    398}
    399
    400static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
    401				       struct mtd_oob_region *oobregion)
    402{
    403	if (section > 1)
    404		return -ERANGE;
    405
    406	if (!section) {
    407		oobregion->offset = 8;
    408		oobregion->length = 5;
    409	} else {
    410		oobregion->offset = 16;
    411		oobregion->length = mtd->oobsize - 16;
    412	}
    413
    414	return 0;
    415}
    416
    417static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
    418	.ecc = hwecc4_ooblayout_small_ecc,
    419	.free = hwecc4_ooblayout_small_free,
    420};
    421
    422#if defined(CONFIG_OF)
    423static const struct of_device_id davinci_nand_of_match[] = {
    424	{.compatible = "ti,davinci-nand", },
    425	{.compatible = "ti,keystone-nand", },
    426	{},
    427};
    428MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
    429
    430static struct davinci_nand_pdata
    431	*nand_davinci_get_pdata(struct platform_device *pdev)
    432{
    433	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
    434		struct davinci_nand_pdata *pdata;
    435		const char *mode;
    436		u32 prop;
    437
    438		pdata =  devm_kzalloc(&pdev->dev,
    439				sizeof(struct davinci_nand_pdata),
    440				GFP_KERNEL);
    441		pdev->dev.platform_data = pdata;
    442		if (!pdata)
    443			return ERR_PTR(-ENOMEM);
    444		if (!of_property_read_u32(pdev->dev.of_node,
    445			"ti,davinci-chipselect", &prop))
    446			pdata->core_chipsel = prop;
    447		else
    448			return ERR_PTR(-EINVAL);
    449
    450		if (!of_property_read_u32(pdev->dev.of_node,
    451			"ti,davinci-mask-ale", &prop))
    452			pdata->mask_ale = prop;
    453		if (!of_property_read_u32(pdev->dev.of_node,
    454			"ti,davinci-mask-cle", &prop))
    455			pdata->mask_cle = prop;
    456		if (!of_property_read_u32(pdev->dev.of_node,
    457			"ti,davinci-mask-chipsel", &prop))
    458			pdata->mask_chipsel = prop;
    459		if (!of_property_read_string(pdev->dev.of_node,
    460			"ti,davinci-ecc-mode", &mode)) {
    461			if (!strncmp("none", mode, 4))
    462				pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
    463			if (!strncmp("soft", mode, 4))
    464				pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
    465			if (!strncmp("hw", mode, 2))
    466				pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
    467		}
    468		if (!of_property_read_u32(pdev->dev.of_node,
    469			"ti,davinci-ecc-bits", &prop))
    470			pdata->ecc_bits = prop;
    471
    472		if (!of_property_read_u32(pdev->dev.of_node,
    473			"ti,davinci-nand-buswidth", &prop) && prop == 16)
    474			pdata->options |= NAND_BUSWIDTH_16;
    475
    476		if (of_property_read_bool(pdev->dev.of_node,
    477			"ti,davinci-nand-use-bbt"))
    478			pdata->bbt_options = NAND_BBT_USE_FLASH;
    479
    480		/*
    481		 * Since kernel v4.8, this driver has been fixed to enable
    482		 * use of 4-bit hardware ECC with subpages and verified on
    483		 * TI's keystone EVMs (K2L, K2HK and K2E).
    484		 * However, in the interest of not breaking systems using
    485		 * existing UBI partitions, sub-page writes are not being
    486		 * (re)enabled. If you want to use subpage writes on Keystone
    487		 * platforms (i.e. do not have any existing UBI partitions),
    488		 * then use "ti,davinci-nand" as the compatible in your
    489		 * device-tree file.
    490		 */
    491		if (of_device_is_compatible(pdev->dev.of_node,
    492					    "ti,keystone-nand")) {
    493			pdata->options |= NAND_NO_SUBPAGE_WRITE;
    494		}
    495	}
    496
    497	return dev_get_platdata(&pdev->dev);
    498}
    499#else
    500static struct davinci_nand_pdata
    501	*nand_davinci_get_pdata(struct platform_device *pdev)
    502{
    503	return dev_get_platdata(&pdev->dev);
    504}
    505#endif
    506
    507static int davinci_nand_attach_chip(struct nand_chip *chip)
    508{
    509	struct mtd_info *mtd = nand_to_mtd(chip);
    510	struct davinci_nand_info *info = to_davinci_nand(mtd);
    511	struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
    512	int ret = 0;
    513
    514	if (IS_ERR(pdata))
    515		return PTR_ERR(pdata);
    516
    517	/* Use board-specific ECC config */
    518	chip->ecc.engine_type = pdata->engine_type;
    519	chip->ecc.placement = pdata->ecc_placement;
    520
    521	switch (chip->ecc.engine_type) {
    522	case NAND_ECC_ENGINE_TYPE_NONE:
    523		pdata->ecc_bits = 0;
    524		break;
    525	case NAND_ECC_ENGINE_TYPE_SOFT:
    526		pdata->ecc_bits = 0;
    527		/*
    528		 * This driver expects Hamming based ECC when engine_type is set
    529		 * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
    530		 * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
    531		 * field to davinci_nand_pdata.
    532		 */
    533		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
    534		break;
    535	case NAND_ECC_ENGINE_TYPE_ON_HOST:
    536		if (pdata->ecc_bits == 4) {
    537			int chunks = mtd->writesize / 512;
    538
    539			if (!chunks || mtd->oobsize < 16) {
    540				dev_dbg(&info->pdev->dev, "too small\n");
    541				return -EINVAL;
    542			}
    543
    544			/*
    545			 * No sanity checks:  CPUs must support this,
    546			 * and the chips may not use NAND_BUSWIDTH_16.
    547			 */
    548
    549			/* No sharing 4-bit hardware between chipselects yet */
    550			spin_lock_irq(&davinci_nand_lock);
    551			if (ecc4_busy)
    552				ret = -EBUSY;
    553			else
    554				ecc4_busy = true;
    555			spin_unlock_irq(&davinci_nand_lock);
    556
    557			if (ret == -EBUSY)
    558				return ret;
    559
    560			chip->ecc.calculate = nand_davinci_calculate_4bit;
    561			chip->ecc.correct = nand_davinci_correct_4bit;
    562			chip->ecc.hwctl = nand_davinci_hwctl_4bit;
    563			chip->ecc.bytes = 10;
    564			chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
    565			chip->ecc.algo = NAND_ECC_ALGO_BCH;
    566
    567			/*
    568			 * Update ECC layout if needed ... for 1-bit HW ECC, the
    569			 * default is OK, but it allocates 6 bytes when only 3
    570			 * are needed (for each 512 bytes). For 4-bit HW ECC,
    571			 * the default is not usable: 10 bytes needed, not 6.
    572			 *
    573			 * For small page chips, preserve the manufacturer's
    574			 * badblock marking data ... and make sure a flash BBT
    575			 * table marker fits in the free bytes.
    576			 */
    577			if (chunks == 1) {
    578				mtd_set_ooblayout(mtd,
    579						  &hwecc4_small_ooblayout_ops);
    580			} else if (chunks == 4 || chunks == 8) {
    581				mtd_set_ooblayout(mtd,
    582						  nand_get_large_page_ooblayout());
    583				chip->ecc.read_page = nand_read_page_hwecc_oob_first;
    584			} else {
    585				return -EIO;
    586			}
    587		} else {
    588			/* 1bit ecc hamming */
    589			chip->ecc.calculate = nand_davinci_calculate_1bit;
    590			chip->ecc.correct = nand_davinci_correct_1bit;
    591			chip->ecc.hwctl = nand_davinci_hwctl_1bit;
    592			chip->ecc.bytes = 3;
    593			chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
    594		}
    595		chip->ecc.size = 512;
    596		chip->ecc.strength = pdata->ecc_bits;
    597		break;
    598	default:
    599		return -EINVAL;
    600	}
    601
    602	return ret;
    603}
    604
    605static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
    606				 unsigned int len, bool force_8bit)
    607{
    608	u32 alignment = ((uintptr_t)buf | len) & 3;
    609
    610	if (force_8bit || (alignment & 1))
    611		ioread8_rep(info->current_cs, buf, len);
    612	else if (alignment & 3)
    613		ioread16_rep(info->current_cs, buf, len >> 1);
    614	else
    615		ioread32_rep(info->current_cs, buf, len >> 2);
    616}
    617
    618static void nand_davinci_data_out(struct davinci_nand_info *info,
    619				  const void *buf, unsigned int len,
    620				  bool force_8bit)
    621{
    622	u32 alignment = ((uintptr_t)buf | len) & 3;
    623
    624	if (force_8bit || (alignment & 1))
    625		iowrite8_rep(info->current_cs, buf, len);
    626	else if (alignment & 3)
    627		iowrite16_rep(info->current_cs, buf, len >> 1);
    628	else
    629		iowrite32_rep(info->current_cs, buf, len >> 2);
    630}
    631
    632static int davinci_nand_exec_instr(struct davinci_nand_info *info,
    633				   const struct nand_op_instr *instr)
    634{
    635	unsigned int i, timeout_us;
    636	u32 status;
    637	int ret;
    638
    639	switch (instr->type) {
    640	case NAND_OP_CMD_INSTR:
    641		iowrite8(instr->ctx.cmd.opcode,
    642			 info->current_cs + info->mask_cle);
    643		break;
    644
    645	case NAND_OP_ADDR_INSTR:
    646		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
    647			iowrite8(instr->ctx.addr.addrs[i],
    648				 info->current_cs + info->mask_ale);
    649		}
    650		break;
    651
    652	case NAND_OP_DATA_IN_INSTR:
    653		nand_davinci_data_in(info, instr->ctx.data.buf.in,
    654				     instr->ctx.data.len,
    655				     instr->ctx.data.force_8bit);
    656		break;
    657
    658	case NAND_OP_DATA_OUT_INSTR:
    659		nand_davinci_data_out(info, instr->ctx.data.buf.out,
    660				      instr->ctx.data.len,
    661				      instr->ctx.data.force_8bit);
    662		break;
    663
    664	case NAND_OP_WAITRDY_INSTR:
    665		timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
    666		ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
    667						 status, status & BIT(0), 100,
    668						 timeout_us);
    669		if (ret)
    670			return ret;
    671
    672		break;
    673	}
    674
    675	if (instr->delay_ns)
    676		ndelay(instr->delay_ns);
    677
    678	return 0;
    679}
    680
    681static int davinci_nand_exec_op(struct nand_chip *chip,
    682				const struct nand_operation *op,
    683				bool check_only)
    684{
    685	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
    686	unsigned int i;
    687
    688	if (check_only)
    689		return 0;
    690
    691	info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
    692
    693	for (i = 0; i < op->ninstrs; i++) {
    694		int ret;
    695
    696		ret = davinci_nand_exec_instr(info, &op->instrs[i]);
    697		if (ret)
    698			return ret;
    699	}
    700
    701	return 0;
    702}
    703
    704static const struct nand_controller_ops davinci_nand_controller_ops = {
    705	.attach_chip = davinci_nand_attach_chip,
    706	.exec_op = davinci_nand_exec_op,
    707};
    708
    709static int nand_davinci_probe(struct platform_device *pdev)
    710{
    711	struct davinci_nand_pdata	*pdata;
    712	struct davinci_nand_info	*info;
    713	struct resource			*res1;
    714	struct resource			*res2;
    715	void __iomem			*vaddr;
    716	void __iomem			*base;
    717	int				ret;
    718	uint32_t			val;
    719	struct mtd_info			*mtd;
    720
    721	pdata = nand_davinci_get_pdata(pdev);
    722	if (IS_ERR(pdata))
    723		return PTR_ERR(pdata);
    724
    725	/* insist on board-specific configuration */
    726	if (!pdata)
    727		return -ENODEV;
    728
    729	/* which external chipselect will we be managing? */
    730	if (pdata->core_chipsel > 3)
    731		return -ENODEV;
    732
    733	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
    734	if (!info)
    735		return -ENOMEM;
    736
    737	platform_set_drvdata(pdev, info);
    738
    739	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    740	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
    741	if (!res1 || !res2) {
    742		dev_err(&pdev->dev, "resource missing\n");
    743		return -EINVAL;
    744	}
    745
    746	vaddr = devm_ioremap_resource(&pdev->dev, res1);
    747	if (IS_ERR(vaddr))
    748		return PTR_ERR(vaddr);
    749
    750	/*
    751	 * This registers range is used to setup NAND settings. In case with
    752	 * TI AEMIF driver, the same memory address range is requested already
    753	 * by AEMIF, so we cannot request it twice, just ioremap.
    754	 * The AEMIF and NAND drivers not use the same registers in this range.
    755	 */
    756	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
    757	if (!base) {
    758		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
    759		return -EADDRNOTAVAIL;
    760	}
    761
    762	info->pdev		= pdev;
    763	info->base		= base;
    764	info->vaddr		= vaddr;
    765
    766	mtd			= nand_to_mtd(&info->chip);
    767	mtd->dev.parent		= &pdev->dev;
    768	nand_set_flash_node(&info->chip, pdev->dev.of_node);
    769
    770	/* options such as NAND_BBT_USE_FLASH */
    771	info->chip.bbt_options	= pdata->bbt_options;
    772	/* options such as 16-bit widths */
    773	info->chip.options	= pdata->options;
    774	info->chip.bbt_td	= pdata->bbt_td;
    775	info->chip.bbt_md	= pdata->bbt_md;
    776	info->timing		= pdata->timing;
    777
    778	info->current_cs	= info->vaddr;
    779	info->core_chipsel	= pdata->core_chipsel;
    780	info->mask_chipsel	= pdata->mask_chipsel;
    781
    782	/* use nandboot-capable ALE/CLE masks by default */
    783	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
    784	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
    785
    786	spin_lock_irq(&davinci_nand_lock);
    787
    788	/* put CSxNAND into NAND mode */
    789	val = davinci_nand_readl(info, NANDFCR_OFFSET);
    790	val |= BIT(info->core_chipsel);
    791	davinci_nand_writel(info, NANDFCR_OFFSET, val);
    792
    793	spin_unlock_irq(&davinci_nand_lock);
    794
    795	/* Scan to find existence of the device(s) */
    796	nand_controller_init(&info->controller);
    797	info->controller.ops = &davinci_nand_controller_ops;
    798	info->chip.controller = &info->controller;
    799	ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
    800	if (ret < 0) {
    801		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
    802		return ret;
    803	}
    804
    805	if (pdata->parts)
    806		ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
    807	else
    808		ret = mtd_device_register(mtd, NULL, 0);
    809	if (ret < 0)
    810		goto err_cleanup_nand;
    811
    812	val = davinci_nand_readl(info, NRCSR_OFFSET);
    813	dev_info(&pdev->dev, "controller rev. %d.%d\n",
    814	       (val >> 8) & 0xff, val & 0xff);
    815
    816	return 0;
    817
    818err_cleanup_nand:
    819	nand_cleanup(&info->chip);
    820
    821	return ret;
    822}
    823
    824static int nand_davinci_remove(struct platform_device *pdev)
    825{
    826	struct davinci_nand_info *info = platform_get_drvdata(pdev);
    827	struct nand_chip *chip = &info->chip;
    828	int ret;
    829
    830	spin_lock_irq(&davinci_nand_lock);
    831	if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
    832		ecc4_busy = false;
    833	spin_unlock_irq(&davinci_nand_lock);
    834
    835	ret = mtd_device_unregister(nand_to_mtd(chip));
    836	WARN_ON(ret);
    837	nand_cleanup(chip);
    838
    839	return 0;
    840}
    841
    842static struct platform_driver nand_davinci_driver = {
    843	.probe		= nand_davinci_probe,
    844	.remove		= nand_davinci_remove,
    845	.driver		= {
    846		.name	= "davinci_nand",
    847		.of_match_table = of_match_ptr(davinci_nand_of_match),
    848	},
    849};
    850MODULE_ALIAS("platform:davinci_nand");
    851
    852module_platform_driver(nand_davinci_driver);
    853
    854MODULE_LICENSE("GPL");
    855MODULE_AUTHOR("Texas Instruments");
    856MODULE_DESCRIPTION("Davinci NAND flash driver");
    857