cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gpmi-nand.c (82849B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Freescale GPMI NAND Flash Driver
      4 *
      5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
      6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
      7 */
      8#include <linux/clk.h>
      9#include <linux/delay.h>
     10#include <linux/slab.h>
     11#include <linux/sched/task_stack.h>
     12#include <linux/interrupt.h>
     13#include <linux/module.h>
     14#include <linux/mtd/partitions.h>
     15#include <linux/of.h>
     16#include <linux/of_device.h>
     17#include <linux/pm_runtime.h>
     18#include <linux/dma/mxs-dma.h>
     19#include "gpmi-nand.h"
     20#include "gpmi-regs.h"
     21#include "bch-regs.h"
     22
     23/* Resource names for the GPMI NAND driver. */
     24#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
     25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
     26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
     27
     28/* Converts time to clock cycles */
     29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
     30
     31#define MXS_SET_ADDR		0x4
     32#define MXS_CLR_ADDR		0x8
     33/*
     34 * Clear the bit and poll it cleared.  This is usually called with
     35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
     36 * (bit 30).
     37 */
     38static int clear_poll_bit(void __iomem *addr, u32 mask)
     39{
     40	int timeout = 0x400;
     41
     42	/* clear the bit */
     43	writel(mask, addr + MXS_CLR_ADDR);
     44
     45	/*
     46	 * SFTRST needs 3 GPMI clocks to settle, the reference manual
     47	 * recommends to wait 1us.
     48	 */
     49	udelay(1);
     50
     51	/* poll the bit becoming clear */
     52	while ((readl(addr) & mask) && --timeout)
     53		/* nothing */;
     54
     55	return !timeout;
     56}
     57
     58#define MODULE_CLKGATE		(1 << 30)
     59#define MODULE_SFTRST		(1 << 31)
     60/*
     61 * The current mxs_reset_block() will do two things:
     62 *  [1] enable the module.
     63 *  [2] reset the module.
     64 *
     65 * In most of the cases, it's ok.
     66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
     67 * If you try to soft reset the BCH block, it becomes unusable until
     68 * the next hard reset. This case occurs in the NAND boot mode. When the board
     69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
     70 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
     71 * You will see a DMA timeout in this case. The bug has been fixed
     72 * in the following chips, such as MX28.
     73 *
     74 * To avoid this bug, just add a new parameter `just_enable` for
     75 * the mxs_reset_block(), and rewrite it here.
     76 */
     77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
     78{
     79	int ret;
     80	int timeout = 0x400;
     81
     82	/* clear and poll SFTRST */
     83	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
     84	if (unlikely(ret))
     85		goto error;
     86
     87	/* clear CLKGATE */
     88	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
     89
     90	if (!just_enable) {
     91		/* set SFTRST to reset the block */
     92		writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
     93		udelay(1);
     94
     95		/* poll CLKGATE becoming set */
     96		while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
     97			/* nothing */;
     98		if (unlikely(!timeout))
     99			goto error;
    100	}
    101
    102	/* clear and poll SFTRST */
    103	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
    104	if (unlikely(ret))
    105		goto error;
    106
    107	/* clear and poll CLKGATE */
    108	ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
    109	if (unlikely(ret))
    110		goto error;
    111
    112	return 0;
    113
    114error:
    115	pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
    116	return -ETIMEDOUT;
    117}
    118
    119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
    120{
    121	struct clk *clk;
    122	int ret;
    123	int i;
    124
    125	for (i = 0; i < GPMI_CLK_MAX; i++) {
    126		clk = this->resources.clock[i];
    127		if (!clk)
    128			break;
    129
    130		if (v) {
    131			ret = clk_prepare_enable(clk);
    132			if (ret)
    133				goto err_clk;
    134		} else {
    135			clk_disable_unprepare(clk);
    136		}
    137	}
    138	return 0;
    139
    140err_clk:
    141	for (; i > 0; i--)
    142		clk_disable_unprepare(this->resources.clock[i - 1]);
    143	return ret;
    144}
    145
    146static int gpmi_init(struct gpmi_nand_data *this)
    147{
    148	struct resources *r = &this->resources;
    149	int ret;
    150
    151	ret = pm_runtime_get_sync(this->dev);
    152	if (ret < 0) {
    153		pm_runtime_put_noidle(this->dev);
    154		return ret;
    155	}
    156
    157	ret = gpmi_reset_block(r->gpmi_regs, false);
    158	if (ret)
    159		goto err_out;
    160
    161	/*
    162	 * Reset BCH here, too. We got failures otherwise :(
    163	 * See later BCH reset for explanation of MX23 and MX28 handling
    164	 */
    165	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
    166	if (ret)
    167		goto err_out;
    168
    169	/* Choose NAND mode. */
    170	writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
    171
    172	/* Set the IRQ polarity. */
    173	writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
    174				r->gpmi_regs + HW_GPMI_CTRL1_SET);
    175
    176	/* Disable Write-Protection. */
    177	writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
    178
    179	/* Select BCH ECC. */
    180	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
    181
    182	/*
    183	 * Decouple the chip select from dma channel. We use dma0 for all
    184	 * the chips, force all NAND RDY_BUSY inputs to be sourced from
    185	 * RDY_BUSY0.
    186	 */
    187	writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY,
    188	       r->gpmi_regs + HW_GPMI_CTRL1_SET);
    189
    190err_out:
    191	pm_runtime_mark_last_busy(this->dev);
    192	pm_runtime_put_autosuspend(this->dev);
    193	return ret;
    194}
    195
    196/* This function is very useful. It is called only when the bug occur. */
    197static void gpmi_dump_info(struct gpmi_nand_data *this)
    198{
    199	struct resources *r = &this->resources;
    200	struct bch_geometry *geo = &this->bch_geometry;
    201	u32 reg;
    202	int i;
    203
    204	dev_err(this->dev, "Show GPMI registers :\n");
    205	for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
    206		reg = readl(r->gpmi_regs + i * 0x10);
    207		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
    208	}
    209
    210	/* start to print out the BCH info */
    211	dev_err(this->dev, "Show BCH registers :\n");
    212	for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
    213		reg = readl(r->bch_regs + i * 0x10);
    214		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
    215	}
    216	dev_err(this->dev, "BCH Geometry :\n"
    217		"GF length              : %u\n"
    218		"ECC Strength           : %u\n"
    219		"Page Size in Bytes     : %u\n"
    220		"Metadata Size in Bytes : %u\n"
    221		"ECC0 Chunk Size in Bytes: %u\n"
    222		"ECCn Chunk Size in Bytes: %u\n"
    223		"ECC Chunk Count        : %u\n"
    224		"Payload Size in Bytes  : %u\n"
    225		"Auxiliary Size in Bytes: %u\n"
    226		"Auxiliary Status Offset: %u\n"
    227		"Block Mark Byte Offset : %u\n"
    228		"Block Mark Bit Offset  : %u\n",
    229		geo->gf_len,
    230		geo->ecc_strength,
    231		geo->page_size,
    232		geo->metadata_size,
    233		geo->ecc0_chunk_size,
    234		geo->eccn_chunk_size,
    235		geo->ecc_chunk_count,
    236		geo->payload_size,
    237		geo->auxiliary_size,
    238		geo->auxiliary_status_offset,
    239		geo->block_mark_byte_offset,
    240		geo->block_mark_bit_offset);
    241}
    242
    243static bool gpmi_check_ecc(struct gpmi_nand_data *this)
    244{
    245	struct nand_chip *chip = &this->nand;
    246	struct bch_geometry *geo = &this->bch_geometry;
    247	struct nand_device *nand = &chip->base;
    248	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
    249
    250	conf->step_size = geo->eccn_chunk_size;
    251	conf->strength = geo->ecc_strength;
    252
    253	/* Do the sanity check. */
    254	if (GPMI_IS_MXS(this)) {
    255		/* The mx23/mx28 only support the GF13. */
    256		if (geo->gf_len == 14)
    257			return false;
    258	}
    259
    260	if (geo->ecc_strength > this->devdata->bch_max_ecc_strength)
    261		return false;
    262
    263	if (!nand_ecc_is_strong_enough(nand))
    264		return false;
    265
    266	return true;
    267}
    268
    269/* check if bbm locates in data chunk rather than ecc chunk */
    270static bool bbm_in_data_chunk(struct gpmi_nand_data *this,
    271			unsigned int *chunk_num)
    272{
    273	struct bch_geometry *geo = &this->bch_geometry;
    274	struct nand_chip *chip = &this->nand;
    275	struct mtd_info *mtd = nand_to_mtd(chip);
    276	unsigned int i, j;
    277
    278	if (geo->ecc0_chunk_size != geo->eccn_chunk_size) {
    279		dev_err(this->dev,
    280			"The size of ecc0_chunk must equal to eccn_chunk\n");
    281		return false;
    282	}
    283
    284	i = (mtd->writesize * 8 - geo->metadata_size * 8) /
    285		(geo->gf_len * geo->ecc_strength +
    286			geo->eccn_chunk_size * 8);
    287
    288	j = (mtd->writesize * 8 - geo->metadata_size * 8) -
    289		(geo->gf_len * geo->ecc_strength +
    290			geo->eccn_chunk_size * 8) * i;
    291
    292	if (j < geo->eccn_chunk_size * 8) {
    293		*chunk_num = i+1;
    294		dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
    295				geo->ecc_strength, *chunk_num);
    296		return true;
    297	}
    298
    299	return false;
    300}
    301
    302/*
    303 * If we can get the ECC information from the nand chip, we do not
    304 * need to calculate them ourselves.
    305 *
    306 * We may have available oob space in this case.
    307 */
    308static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
    309				    unsigned int ecc_strength,
    310				    unsigned int ecc_step)
    311{
    312	struct bch_geometry *geo = &this->bch_geometry;
    313	struct nand_chip *chip = &this->nand;
    314	struct mtd_info *mtd = nand_to_mtd(chip);
    315	unsigned int block_mark_bit_offset;
    316
    317	switch (ecc_step) {
    318	case SZ_512:
    319		geo->gf_len = 13;
    320		break;
    321	case SZ_1K:
    322		geo->gf_len = 14;
    323		break;
    324	default:
    325		dev_err(this->dev,
    326			"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
    327			nanddev_get_ecc_requirements(&chip->base)->strength,
    328			nanddev_get_ecc_requirements(&chip->base)->step_size);
    329		return -EINVAL;
    330	}
    331	geo->ecc0_chunk_size = ecc_step;
    332	geo->eccn_chunk_size = ecc_step;
    333	geo->ecc_strength = round_up(ecc_strength, 2);
    334	if (!gpmi_check_ecc(this))
    335		return -EINVAL;
    336
    337	/* Keep the C >= O */
    338	if (geo->eccn_chunk_size < mtd->oobsize) {
    339		dev_err(this->dev,
    340			"unsupported nand chip. ecc size: %d, oob size : %d\n",
    341			ecc_step, mtd->oobsize);
    342		return -EINVAL;
    343	}
    344
    345	/* The default value, see comment in the legacy_set_geometry(). */
    346	geo->metadata_size = 10;
    347
    348	geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
    349
    350	/*
    351	 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
    352	 *
    353	 *    |                          P                            |
    354	 *    |<----------------------------------------------------->|
    355	 *    |                                                       |
    356	 *    |                                        (Block Mark)   |
    357	 *    |                      P'                      |      | |     |
    358	 *    |<-------------------------------------------->|  D   | |  O' |
    359	 *    |                                              |<---->| |<--->|
    360	 *    V                                              V      V V     V
    361	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
    362	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
    363	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
    364	 *                                                   ^              ^
    365	 *                                                   |      O       |
    366	 *                                                   |<------------>|
    367	 *                                                   |              |
    368	 *
    369	 *	P : the page size for BCH module.
    370	 *	E : The ECC strength.
    371	 *	G : the length of Galois Field.
    372	 *	N : The chunk count of per page.
    373	 *	M : the metasize of per page.
    374	 *	C : the ecc chunk size, aka the "data" above.
    375	 *	P': the nand chip's page size.
    376	 *	O : the nand chip's oob size.
    377	 *	O': the free oob.
    378	 *
    379	 *	The formula for P is :
    380	 *
    381	 *	            E * G * N
    382	 *	       P = ------------ + P' + M
    383	 *                      8
    384	 *
    385	 * The position of block mark moves forward in the ECC-based view
    386	 * of page, and the delta is:
    387	 *
    388	 *                   E * G * (N - 1)
    389	 *             D = (---------------- + M)
    390	 *                          8
    391	 *
    392	 * Please see the comment in legacy_set_geometry().
    393	 * With the condition C >= O , we still can get same result.
    394	 * So the bit position of the physical block mark within the ECC-based
    395	 * view of the page is :
    396	 *             (P' - D) * 8
    397	 */
    398	geo->page_size = mtd->writesize + geo->metadata_size +
    399		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
    400
    401	geo->payload_size = mtd->writesize;
    402
    403	geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
    404	geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
    405				+ ALIGN(geo->ecc_chunk_count, 4);
    406
    407	if (!this->swap_block_mark)
    408		return 0;
    409
    410	/* For bit swap. */
    411	block_mark_bit_offset = mtd->writesize * 8 -
    412		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
    413				+ geo->metadata_size * 8);
    414
    415	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
    416	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
    417	return 0;
    418}
    419
    420/*
    421 *  Calculate the ECC strength by hand:
    422 *	E : The ECC strength.
    423 *	G : the length of Galois Field.
    424 *	N : The chunk count of per page.
    425 *	O : the oobsize of the NAND chip.
    426 *	M : the metasize of per page.
    427 *
    428 *	The formula is :
    429 *		E * G * N
    430 *	      ------------ <= (O - M)
    431 *                  8
    432 *
    433 *      So, we get E by:
    434 *                    (O - M) * 8
    435 *              E <= -------------
    436 *                       G * N
    437 */
    438static inline int get_ecc_strength(struct gpmi_nand_data *this)
    439{
    440	struct bch_geometry *geo = &this->bch_geometry;
    441	struct mtd_info	*mtd = nand_to_mtd(&this->nand);
    442	int ecc_strength;
    443
    444	ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
    445			/ (geo->gf_len * geo->ecc_chunk_count);
    446
    447	/* We need the minor even number. */
    448	return round_down(ecc_strength, 2);
    449}
    450
    451static int set_geometry_for_large_oob(struct gpmi_nand_data *this)
    452{
    453	struct bch_geometry *geo = &this->bch_geometry;
    454	struct nand_chip *chip = &this->nand;
    455	struct mtd_info *mtd = nand_to_mtd(chip);
    456	const struct nand_ecc_props *requirements =
    457		nanddev_get_ecc_requirements(&chip->base);
    458	unsigned int block_mark_bit_offset;
    459	unsigned int max_ecc;
    460	unsigned int bbm_chunk;
    461	unsigned int i;
    462
    463	/* sanity check for the minimum ecc nand required */
    464	if (!(requirements->strength > 0 &&
    465	      requirements->step_size > 0))
    466		return -EINVAL;
    467	geo->ecc_strength = requirements->strength;
    468
    469	/* check if platform can support this nand */
    470	if (!gpmi_check_ecc(this)) {
    471		dev_err(this->dev,
    472			"unsupported NAND chip, minimum ecc required %d\n",
    473			geo->ecc_strength);
    474		return -EINVAL;
    475	}
    476
    477	/* calculate the maximum ecc platform can support*/
    478	geo->metadata_size = 10;
    479	geo->gf_len = 14;
    480	geo->ecc0_chunk_size = 1024;
    481	geo->eccn_chunk_size = 1024;
    482	geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
    483	max_ecc = min(get_ecc_strength(this),
    484		      this->devdata->bch_max_ecc_strength);
    485
    486	/*
    487	 * search a supported ecc strength that makes bbm
    488	 * located in data chunk
    489	 */
    490	geo->ecc_strength = max_ecc;
    491	while (!(geo->ecc_strength < requirements->strength)) {
    492		if (bbm_in_data_chunk(this, &bbm_chunk))
    493			goto geo_setting;
    494		geo->ecc_strength -= 2;
    495	}
    496
    497	/* if none of them works, keep using the minimum ecc */
    498	/* nand required but changing ecc page layout  */
    499	geo->ecc_strength = requirements->strength;
    500	/* add extra ecc for meta data */
    501	geo->ecc0_chunk_size = 0;
    502	geo->ecc_chunk_count = (mtd->writesize / geo->eccn_chunk_size) + 1;
    503	geo->ecc_for_meta = 1;
    504	/* check if oob can afford this extra ecc chunk */
    505	if (mtd->oobsize * 8 < geo->metadata_size * 8 +
    506	    geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) {
    507		dev_err(this->dev, "unsupported NAND chip with new layout\n");
    508		return -EINVAL;
    509	}
    510
    511	/* calculate in which chunk bbm located */
    512	bbm_chunk = (mtd->writesize * 8 - geo->metadata_size * 8 -
    513		     geo->gf_len * geo->ecc_strength) /
    514		     (geo->gf_len * geo->ecc_strength +
    515		     geo->eccn_chunk_size * 8) + 1;
    516
    517geo_setting:
    518
    519	geo->page_size = mtd->writesize + geo->metadata_size +
    520		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
    521	geo->payload_size = mtd->writesize;
    522
    523	/*
    524	 * The auxiliary buffer contains the metadata and the ECC status. The
    525	 * metadata is padded to the nearest 32-bit boundary. The ECC status
    526	 * contains one byte for every ECC chunk, and is also padded to the
    527	 * nearest 32-bit boundary.
    528	 */
    529	geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
    530	geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
    531				    + ALIGN(geo->ecc_chunk_count, 4);
    532
    533	if (!this->swap_block_mark)
    534		return 0;
    535
    536	/* calculate the number of ecc chunk behind the bbm */
    537	i = (mtd->writesize / geo->eccn_chunk_size) - bbm_chunk + 1;
    538
    539	block_mark_bit_offset = mtd->writesize * 8 -
    540		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
    541		+ geo->metadata_size * 8);
    542
    543	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
    544	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
    545
    546	dev_dbg(this->dev, "BCH Geometry :\n"
    547		"GF length              : %u\n"
    548		"ECC Strength           : %u\n"
    549		"Page Size in Bytes     : %u\n"
    550		"Metadata Size in Bytes : %u\n"
    551		"ECC0 Chunk Size in Bytes: %u\n"
    552		"ECCn Chunk Size in Bytes: %u\n"
    553		"ECC Chunk Count        : %u\n"
    554		"Payload Size in Bytes  : %u\n"
    555		"Auxiliary Size in Bytes: %u\n"
    556		"Auxiliary Status Offset: %u\n"
    557		"Block Mark Byte Offset : %u\n"
    558		"Block Mark Bit Offset  : %u\n"
    559		"Block Mark in chunk	: %u\n"
    560		"Ecc for Meta data	: %u\n",
    561		geo->gf_len,
    562		geo->ecc_strength,
    563		geo->page_size,
    564		geo->metadata_size,
    565		geo->ecc0_chunk_size,
    566		geo->eccn_chunk_size,
    567		geo->ecc_chunk_count,
    568		geo->payload_size,
    569		geo->auxiliary_size,
    570		geo->auxiliary_status_offset,
    571		geo->block_mark_byte_offset,
    572		geo->block_mark_bit_offset,
    573		bbm_chunk,
    574		geo->ecc_for_meta);
    575
    576	return 0;
    577}
    578
    579static int legacy_set_geometry(struct gpmi_nand_data *this)
    580{
    581	struct bch_geometry *geo = &this->bch_geometry;
    582	struct mtd_info *mtd = nand_to_mtd(&this->nand);
    583	unsigned int metadata_size;
    584	unsigned int status_size;
    585	unsigned int block_mark_bit_offset;
    586
    587	/*
    588	 * The size of the metadata can be changed, though we set it to 10
    589	 * bytes now. But it can't be too large, because we have to save
    590	 * enough space for BCH.
    591	 */
    592	geo->metadata_size = 10;
    593
    594	/* The default for the length of Galois Field. */
    595	geo->gf_len = 13;
    596
    597	/* The default for chunk size. */
    598	geo->ecc0_chunk_size = 512;
    599	geo->eccn_chunk_size = 512;
    600	while (geo->eccn_chunk_size < mtd->oobsize) {
    601		geo->ecc0_chunk_size *= 2; /* keep C >= O */
    602		geo->eccn_chunk_size *= 2; /* keep C >= O */
    603		geo->gf_len = 14;
    604	}
    605
    606	geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
    607
    608	/* We use the same ECC strength for all chunks. */
    609	geo->ecc_strength = get_ecc_strength(this);
    610	if (!gpmi_check_ecc(this)) {
    611		dev_err(this->dev,
    612			"ecc strength: %d cannot be supported by the controller (%d)\n"
    613			"try to use minimum ecc strength that NAND chip required\n",
    614			geo->ecc_strength,
    615			this->devdata->bch_max_ecc_strength);
    616		return -EINVAL;
    617	}
    618
    619	geo->page_size = mtd->writesize + geo->metadata_size +
    620		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
    621	geo->payload_size = mtd->writesize;
    622
    623	/*
    624	 * The auxiliary buffer contains the metadata and the ECC status. The
    625	 * metadata is padded to the nearest 32-bit boundary. The ECC status
    626	 * contains one byte for every ECC chunk, and is also padded to the
    627	 * nearest 32-bit boundary.
    628	 */
    629	metadata_size = ALIGN(geo->metadata_size, 4);
    630	status_size   = ALIGN(geo->ecc_chunk_count, 4);
    631
    632	geo->auxiliary_size = metadata_size + status_size;
    633	geo->auxiliary_status_offset = metadata_size;
    634
    635	if (!this->swap_block_mark)
    636		return 0;
    637
    638	/*
    639	 * We need to compute the byte and bit offsets of
    640	 * the physical block mark within the ECC-based view of the page.
    641	 *
    642	 * NAND chip with 2K page shows below:
    643	 *                                             (Block Mark)
    644	 *                                                   |      |
    645	 *                                                   |  D   |
    646	 *                                                   |<---->|
    647	 *                                                   V      V
    648	 *    +---+----------+-+----------+-+----------+-+----------+-+
    649	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
    650	 *    +---+----------+-+----------+-+----------+-+----------+-+
    651	 *
    652	 * The position of block mark moves forward in the ECC-based view
    653	 * of page, and the delta is:
    654	 *
    655	 *                   E * G * (N - 1)
    656	 *             D = (---------------- + M)
    657	 *                          8
    658	 *
    659	 * With the formula to compute the ECC strength, and the condition
    660	 *       : C >= O         (C is the ecc chunk size)
    661	 *
    662	 * It's easy to deduce to the following result:
    663	 *
    664	 *         E * G       (O - M)      C - M         C - M
    665	 *      ----------- <= ------- <=  --------  <  ---------
    666	 *           8            N           N          (N - 1)
    667	 *
    668	 *  So, we get:
    669	 *
    670	 *                   E * G * (N - 1)
    671	 *             D = (---------------- + M) < C
    672	 *                          8
    673	 *
    674	 *  The above inequality means the position of block mark
    675	 *  within the ECC-based view of the page is still in the data chunk,
    676	 *  and it's NOT in the ECC bits of the chunk.
    677	 *
    678	 *  Use the following to compute the bit position of the
    679	 *  physical block mark within the ECC-based view of the page:
    680	 *          (page_size - D) * 8
    681	 *
    682	 *  --Huang Shijie
    683	 */
    684	block_mark_bit_offset = mtd->writesize * 8 -
    685		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
    686				+ geo->metadata_size * 8);
    687
    688	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
    689	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
    690	return 0;
    691}
    692
    693static int common_nfc_set_geometry(struct gpmi_nand_data *this)
    694{
    695	struct nand_chip *chip = &this->nand;
    696	struct mtd_info *mtd = nand_to_mtd(&this->nand);
    697	const struct nand_ecc_props *requirements =
    698		nanddev_get_ecc_requirements(&chip->base);
    699	bool use_minimun_ecc;
    700	int err;
    701
    702	use_minimun_ecc = of_property_read_bool(this->dev->of_node,
    703						"fsl,use-minimum-ecc");
    704
    705	/* use legacy bch geometry settings by default*/
    706	if ((!use_minimun_ecc && mtd->oobsize < 1024) ||
    707	    !(requirements->strength > 0 && requirements->step_size > 0)) {
    708		dev_dbg(this->dev, "use legacy bch geometry\n");
    709		err = legacy_set_geometry(this);
    710		if (!err)
    711			return 0;
    712	}
    713
    714	/* for large oob nand */
    715	if (mtd->oobsize > 1024) {
    716		dev_dbg(this->dev, "use large oob bch geometry\n");
    717		err = set_geometry_for_large_oob(this);
    718		if (!err)
    719			return 0;
    720	}
    721
    722	/* otherwise use the minimum ecc nand chip required */
    723	dev_dbg(this->dev, "use minimum ecc bch geometry\n");
    724	err = set_geometry_by_ecc_info(this, requirements->strength,
    725					requirements->step_size);
    726	if (err)
    727		dev_err(this->dev, "none of the bch geometry setting works\n");
    728
    729	return err;
    730}
    731
    732/* Configures the geometry for BCH.  */
    733static int bch_set_geometry(struct gpmi_nand_data *this)
    734{
    735	struct resources *r = &this->resources;
    736	int ret;
    737
    738	ret = common_nfc_set_geometry(this);
    739	if (ret)
    740		return ret;
    741
    742	ret = pm_runtime_get_sync(this->dev);
    743	if (ret < 0) {
    744		pm_runtime_put_autosuspend(this->dev);
    745		return ret;
    746	}
    747
    748	/*
    749	* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
    750	* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
    751	* and MX28.
    752	*/
    753	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
    754	if (ret)
    755		goto err_out;
    756
    757	/* Set *all* chip selects to use layout 0. */
    758	writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
    759
    760	ret = 0;
    761err_out:
    762	pm_runtime_mark_last_busy(this->dev);
    763	pm_runtime_put_autosuspend(this->dev);
    764
    765	return ret;
    766}
    767
    768/*
    769 * <1> Firstly, we should know what's the GPMI-clock means.
    770 *     The GPMI-clock is the internal clock in the gpmi nand controller.
    771 *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
    772 *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
    773 *
    774 * <2> Secondly, we should know what's the frequency on the nand chip pins.
    775 *     The frequency on the nand chip pins is derived from the GPMI-clock.
    776 *     We can get it from the following equation:
    777 *
    778 *         F = G / (DS + DH)
    779 *
    780 *         F  : the frequency on the nand chip pins.
    781 *         G  : the GPMI clock, such as 100MHz.
    782 *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
    783 *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
    784 *
    785 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
    786 *     the nand EDO(extended Data Out) timing could be applied.
    787 *     The GPMI implements a feedback read strobe to sample the read data.
    788 *     The feedback read strobe can be delayed to support the nand EDO timing
    789 *     where the read strobe may deasserts before the read data is valid, and
    790 *     read data is valid for some time after read strobe.
    791 *
    792 *     The following figure illustrates some aspects of a NAND Flash read:
    793 *
    794 *                   |<---tREA---->|
    795 *                   |             |
    796 *                   |         |   |
    797 *                   |<--tRP-->|   |
    798 *                   |         |   |
    799 *                  __          ___|__________________________________
    800 *     RDN            \________/   |
    801 *                                 |
    802 *                                 /---------\
    803 *     Read Data    --------------<           >---------
    804 *                                 \---------/
    805 *                                |     |
    806 *                                |<-D->|
    807 *     FeedbackRDN  ________             ____________
    808 *                          \___________/
    809 *
    810 *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
    811 *
    812 *
    813 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
    814 *
    815 *  4.1) From the aspect of the nand chip pins:
    816 *        Delay = (tREA + C - tRP)               {1}
    817 *
    818 *        tREA : the maximum read access time.
    819 *        C    : a constant to adjust the delay. default is 4000ps.
    820 *        tRP  : the read pulse width, which is exactly:
    821 *                   tRP = (GPMI-clock-period) * DATA_SETUP
    822 *
    823 *  4.2) From the aspect of the GPMI nand controller:
    824 *         Delay = RDN_DELAY * 0.125 * RP        {2}
    825 *
    826 *         RP   : the DLL reference period.
    827 *            if (GPMI-clock-period > DLL_THRETHOLD)
    828 *                   RP = GPMI-clock-period / 2;
    829 *            else
    830 *                   RP = GPMI-clock-period;
    831 *
    832 *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
    833 *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
    834 *            is 16000ps, but in mx6q, we use 12000ps.
    835 *
    836 *  4.3) since {1} equals {2}, we get:
    837 *
    838 *                     (tREA + 4000 - tRP) * 8
    839 *         RDN_DELAY = -----------------------     {3}
    840 *                           RP
    841 */
    842static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
    843				    const struct nand_sdr_timings *sdr)
    844{
    845	struct gpmi_nfc_hardware_timing *hw = &this->hw;
    846	struct resources *r = &this->resources;
    847	unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
    848	unsigned int period_ps, reference_period_ps;
    849	unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
    850	unsigned int tRP_ps;
    851	bool use_half_period;
    852	int sample_delay_ps, sample_delay_factor;
    853	u16 busy_timeout_cycles;
    854	u8 wrn_dly_sel;
    855	unsigned long clk_rate, min_rate;
    856
    857	if (sdr->tRC_min >= 30000) {
    858		/* ONFI non-EDO modes [0-3] */
    859		hw->clk_rate = 22000000;
    860		min_rate = 0;
    861		wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
    862	} else if (sdr->tRC_min >= 25000) {
    863		/* ONFI EDO mode 4 */
    864		hw->clk_rate = 80000000;
    865		min_rate = 22000000;
    866		wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
    867	} else {
    868		/* ONFI EDO mode 5 */
    869		hw->clk_rate = 100000000;
    870		min_rate = 80000000;
    871		wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
    872	}
    873
    874	clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
    875	if (clk_rate <= min_rate) {
    876		dev_err(this->dev, "clock setting: expected %ld, got %ld\n",
    877			hw->clk_rate, clk_rate);
    878		return -ENOTSUPP;
    879	}
    880
    881	hw->clk_rate = clk_rate;
    882	/* SDR core timings are given in picoseconds */
    883	period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
    884
    885	addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
    886	data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
    887	data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
    888	busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
    889
    890	hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
    891		      BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
    892		      BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
    893	hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
    894
    895	/*
    896	 * Derive NFC ideal delay from {3}:
    897	 *
    898	 *                     (tREA + 4000 - tRP) * 8
    899	 *         RDN_DELAY = -----------------------
    900	 *                                RP
    901	 */
    902	if (period_ps > dll_threshold_ps) {
    903		use_half_period = true;
    904		reference_period_ps = period_ps / 2;
    905	} else {
    906		use_half_period = false;
    907		reference_period_ps = period_ps;
    908	}
    909
    910	tRP_ps = data_setup_cycles * period_ps;
    911	sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
    912	if (sample_delay_ps > 0)
    913		sample_delay_factor = sample_delay_ps / reference_period_ps;
    914	else
    915		sample_delay_factor = 0;
    916
    917	hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
    918	if (sample_delay_factor)
    919		hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
    920			      BM_GPMI_CTRL1_DLL_ENABLE |
    921			      (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
    922	return 0;
    923}
    924
    925static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
    926{
    927	struct gpmi_nfc_hardware_timing *hw = &this->hw;
    928	struct resources *r = &this->resources;
    929	void __iomem *gpmi_regs = r->gpmi_regs;
    930	unsigned int dll_wait_time_us;
    931	int ret;
    932
    933	/* Clock dividers do NOT guarantee a clean clock signal on its output
    934	 * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
    935	 * all clock dividers provide these guarantee.
    936	 */
    937	if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
    938		clk_disable_unprepare(r->clock[0]);
    939
    940	ret = clk_set_rate(r->clock[0], hw->clk_rate);
    941	if (ret) {
    942		dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
    943		return ret;
    944	}
    945
    946	if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
    947		ret = clk_prepare_enable(r->clock[0]);
    948		if (ret)
    949			return ret;
    950	}
    951
    952	writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
    953	writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
    954
    955	/*
    956	 * Clear several CTRL1 fields, DLL must be disabled when setting
    957	 * RDN_DELAY or HALF_PERIOD.
    958	 */
    959	writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
    960	writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
    961
    962	/* Wait 64 clock cycles before using the GPMI after enabling the DLL */
    963	dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
    964	if (!dll_wait_time_us)
    965		dll_wait_time_us = 1;
    966
    967	/* Wait for the DLL to settle. */
    968	udelay(dll_wait_time_us);
    969
    970	return 0;
    971}
    972
    973static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
    974				const struct nand_interface_config *conf)
    975{
    976	struct gpmi_nand_data *this = nand_get_controller_data(chip);
    977	const struct nand_sdr_timings *sdr;
    978	int ret;
    979
    980	/* Retrieve required NAND timings */
    981	sdr = nand_get_sdr_timings(conf);
    982	if (IS_ERR(sdr))
    983		return PTR_ERR(sdr);
    984
    985	/* Only MX28/MX6 GPMI controller can reach EDO timings */
    986	if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
    987		return -ENOTSUPP;
    988
    989	/* Stop here if this call was just a check */
    990	if (chipnr < 0)
    991		return 0;
    992
    993	/* Do the actual derivation of the controller timings */
    994	ret = gpmi_nfc_compute_timings(this, sdr);
    995	if (ret)
    996		return ret;
    997
    998	this->hw.must_apply_timings = true;
    999
   1000	return 0;
   1001}
   1002
   1003/* Clears a BCH interrupt. */
   1004static void gpmi_clear_bch(struct gpmi_nand_data *this)
   1005{
   1006	struct resources *r = &this->resources;
   1007	writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
   1008}
   1009
   1010static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
   1011{
   1012	/* We use the DMA channel 0 to access all the nand chips. */
   1013	return this->dma_chans[0];
   1014}
   1015
   1016/* This will be called after the DMA operation is finished. */
   1017static void dma_irq_callback(void *param)
   1018{
   1019	struct gpmi_nand_data *this = param;
   1020	struct completion *dma_c = &this->dma_done;
   1021
   1022	complete(dma_c);
   1023}
   1024
   1025static irqreturn_t bch_irq(int irq, void *cookie)
   1026{
   1027	struct gpmi_nand_data *this = cookie;
   1028
   1029	gpmi_clear_bch(this);
   1030	complete(&this->bch_done);
   1031	return IRQ_HANDLED;
   1032}
   1033
   1034static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
   1035{
   1036	/*
   1037	 * raw_len is the length to read/write including bch data which
   1038	 * we are passed in exec_op. Calculate the data length from it.
   1039	 */
   1040	if (this->bch)
   1041		return ALIGN_DOWN(raw_len, this->bch_geometry.eccn_chunk_size);
   1042	else
   1043		return raw_len;
   1044}
   1045
   1046/* Can we use the upper's buffer directly for DMA? */
   1047static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
   1048			     int raw_len, struct scatterlist *sgl,
   1049			     enum dma_data_direction dr)
   1050{
   1051	int ret;
   1052	int len = gpmi_raw_len_to_len(this, raw_len);
   1053
   1054	/* first try to map the upper buffer directly */
   1055	if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
   1056		sg_init_one(sgl, buf, len);
   1057		ret = dma_map_sg(this->dev, sgl, 1, dr);
   1058		if (ret == 0)
   1059			goto map_fail;
   1060
   1061		return true;
   1062	}
   1063
   1064map_fail:
   1065	/* We have to use our own DMA buffer. */
   1066	sg_init_one(sgl, this->data_buffer_dma, len);
   1067
   1068	if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
   1069		memcpy(this->data_buffer_dma, buf, len);
   1070
   1071	dma_map_sg(this->dev, sgl, 1, dr);
   1072
   1073	return false;
   1074}
   1075
   1076/* add our owner bbt descriptor */
   1077static uint8_t scan_ff_pattern[] = { 0xff };
   1078static struct nand_bbt_descr gpmi_bbt_descr = {
   1079	.options	= 0,
   1080	.offs		= 0,
   1081	.len		= 1,
   1082	.pattern	= scan_ff_pattern
   1083};
   1084
   1085/*
   1086 * We may change the layout if we can get the ECC info from the datasheet,
   1087 * else we will use all the (page + OOB).
   1088 */
   1089static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
   1090			      struct mtd_oob_region *oobregion)
   1091{
   1092	struct nand_chip *chip = mtd_to_nand(mtd);
   1093	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1094	struct bch_geometry *geo = &this->bch_geometry;
   1095
   1096	if (section)
   1097		return -ERANGE;
   1098
   1099	oobregion->offset = 0;
   1100	oobregion->length = geo->page_size - mtd->writesize;
   1101
   1102	return 0;
   1103}
   1104
   1105static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
   1106			       struct mtd_oob_region *oobregion)
   1107{
   1108	struct nand_chip *chip = mtd_to_nand(mtd);
   1109	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1110	struct bch_geometry *geo = &this->bch_geometry;
   1111
   1112	if (section)
   1113		return -ERANGE;
   1114
   1115	/* The available oob size we have. */
   1116	if (geo->page_size < mtd->writesize + mtd->oobsize) {
   1117		oobregion->offset = geo->page_size - mtd->writesize;
   1118		oobregion->length = mtd->oobsize - oobregion->offset;
   1119	}
   1120
   1121	return 0;
   1122}
   1123
   1124static const char * const gpmi_clks_for_mx2x[] = {
   1125	"gpmi_io",
   1126};
   1127
   1128static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
   1129	.ecc = gpmi_ooblayout_ecc,
   1130	.free = gpmi_ooblayout_free,
   1131};
   1132
   1133static const struct gpmi_devdata gpmi_devdata_imx23 = {
   1134	.type = IS_MX23,
   1135	.bch_max_ecc_strength = 20,
   1136	.max_chain_delay = 16000,
   1137	.clks = gpmi_clks_for_mx2x,
   1138	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
   1139};
   1140
   1141static const struct gpmi_devdata gpmi_devdata_imx28 = {
   1142	.type = IS_MX28,
   1143	.bch_max_ecc_strength = 20,
   1144	.max_chain_delay = 16000,
   1145	.clks = gpmi_clks_for_mx2x,
   1146	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
   1147};
   1148
   1149static const char * const gpmi_clks_for_mx6[] = {
   1150	"gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
   1151};
   1152
   1153static const struct gpmi_devdata gpmi_devdata_imx6q = {
   1154	.type = IS_MX6Q,
   1155	.bch_max_ecc_strength = 40,
   1156	.max_chain_delay = 12000,
   1157	.clks = gpmi_clks_for_mx6,
   1158	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
   1159};
   1160
   1161static const struct gpmi_devdata gpmi_devdata_imx6sx = {
   1162	.type = IS_MX6SX,
   1163	.bch_max_ecc_strength = 62,
   1164	.max_chain_delay = 12000,
   1165	.clks = gpmi_clks_for_mx6,
   1166	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
   1167};
   1168
   1169static const char * const gpmi_clks_for_mx7d[] = {
   1170	"gpmi_io", "gpmi_bch_apb",
   1171};
   1172
   1173static const struct gpmi_devdata gpmi_devdata_imx7d = {
   1174	.type = IS_MX7D,
   1175	.bch_max_ecc_strength = 62,
   1176	.max_chain_delay = 12000,
   1177	.clks = gpmi_clks_for_mx7d,
   1178	.clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
   1179};
   1180
   1181static int acquire_register_block(struct gpmi_nand_data *this,
   1182				  const char *res_name)
   1183{
   1184	struct platform_device *pdev = this->pdev;
   1185	struct resources *res = &this->resources;
   1186	void __iomem *p;
   1187
   1188	p = devm_platform_ioremap_resource_byname(pdev, res_name);
   1189	if (IS_ERR(p))
   1190		return PTR_ERR(p);
   1191
   1192	if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
   1193		res->gpmi_regs = p;
   1194	else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
   1195		res->bch_regs = p;
   1196	else
   1197		dev_err(this->dev, "unknown resource name : %s\n", res_name);
   1198
   1199	return 0;
   1200}
   1201
   1202static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
   1203{
   1204	struct platform_device *pdev = this->pdev;
   1205	const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
   1206	int err;
   1207
   1208	err = platform_get_irq_byname(pdev, res_name);
   1209	if (err < 0)
   1210		return err;
   1211
   1212	err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
   1213	if (err)
   1214		dev_err(this->dev, "error requesting BCH IRQ\n");
   1215
   1216	return err;
   1217}
   1218
   1219static void release_dma_channels(struct gpmi_nand_data *this)
   1220{
   1221	unsigned int i;
   1222	for (i = 0; i < DMA_CHANS; i++)
   1223		if (this->dma_chans[i]) {
   1224			dma_release_channel(this->dma_chans[i]);
   1225			this->dma_chans[i] = NULL;
   1226		}
   1227}
   1228
   1229static int acquire_dma_channels(struct gpmi_nand_data *this)
   1230{
   1231	struct platform_device *pdev = this->pdev;
   1232	struct dma_chan *dma_chan;
   1233	int ret = 0;
   1234
   1235	/* request dma channel */
   1236	dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
   1237	if (IS_ERR(dma_chan)) {
   1238		ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
   1239				    "DMA channel request failed\n");
   1240		release_dma_channels(this);
   1241	} else {
   1242		this->dma_chans[0] = dma_chan;
   1243	}
   1244
   1245	return ret;
   1246}
   1247
   1248static int gpmi_get_clks(struct gpmi_nand_data *this)
   1249{
   1250	struct resources *r = &this->resources;
   1251	struct clk *clk;
   1252	int err, i;
   1253
   1254	for (i = 0; i < this->devdata->clks_count; i++) {
   1255		clk = devm_clk_get(this->dev, this->devdata->clks[i]);
   1256		if (IS_ERR(clk)) {
   1257			err = PTR_ERR(clk);
   1258			goto err_clock;
   1259		}
   1260
   1261		r->clock[i] = clk;
   1262	}
   1263
   1264	return 0;
   1265
   1266err_clock:
   1267	dev_dbg(this->dev, "failed in finding the clocks.\n");
   1268	return err;
   1269}
   1270
   1271static int acquire_resources(struct gpmi_nand_data *this)
   1272{
   1273	int ret;
   1274
   1275	ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
   1276	if (ret)
   1277		goto exit_regs;
   1278
   1279	ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
   1280	if (ret)
   1281		goto exit_regs;
   1282
   1283	ret = acquire_bch_irq(this, bch_irq);
   1284	if (ret)
   1285		goto exit_regs;
   1286
   1287	ret = acquire_dma_channels(this);
   1288	if (ret)
   1289		goto exit_regs;
   1290
   1291	ret = gpmi_get_clks(this);
   1292	if (ret)
   1293		goto exit_clock;
   1294	return 0;
   1295
   1296exit_clock:
   1297	release_dma_channels(this);
   1298exit_regs:
   1299	return ret;
   1300}
   1301
   1302static void release_resources(struct gpmi_nand_data *this)
   1303{
   1304	release_dma_channels(this);
   1305}
   1306
   1307static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
   1308{
   1309	struct device *dev = this->dev;
   1310	struct bch_geometry *geo = &this->bch_geometry;
   1311
   1312	if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
   1313		dma_free_coherent(dev, geo->auxiliary_size,
   1314					this->auxiliary_virt,
   1315					this->auxiliary_phys);
   1316	kfree(this->data_buffer_dma);
   1317	kfree(this->raw_buffer);
   1318
   1319	this->data_buffer_dma	= NULL;
   1320	this->raw_buffer	= NULL;
   1321}
   1322
   1323/* Allocate the DMA buffers */
   1324static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
   1325{
   1326	struct bch_geometry *geo = &this->bch_geometry;
   1327	struct device *dev = this->dev;
   1328	struct mtd_info *mtd = nand_to_mtd(&this->nand);
   1329
   1330	/*
   1331	 * [2] Allocate a read/write data buffer.
   1332	 *     The gpmi_alloc_dma_buffer can be called twice.
   1333	 *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
   1334	 *     is called before the NAND identification; and we allocate a
   1335	 *     buffer of the real NAND page size when the gpmi_alloc_dma_buffer
   1336	 *     is called after.
   1337	 */
   1338	this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
   1339					GFP_DMA | GFP_KERNEL);
   1340	if (this->data_buffer_dma == NULL)
   1341		goto error_alloc;
   1342
   1343	this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
   1344					&this->auxiliary_phys, GFP_DMA);
   1345	if (!this->auxiliary_virt)
   1346		goto error_alloc;
   1347
   1348	this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
   1349	if (!this->raw_buffer)
   1350		goto error_alloc;
   1351
   1352	return 0;
   1353
   1354error_alloc:
   1355	gpmi_free_dma_buffer(this);
   1356	return -ENOMEM;
   1357}
   1358
   1359/*
   1360 * Handles block mark swapping.
   1361 * It can be called in swapping the block mark, or swapping it back,
   1362 * because the the operations are the same.
   1363 */
   1364static void block_mark_swapping(struct gpmi_nand_data *this,
   1365				void *payload, void *auxiliary)
   1366{
   1367	struct bch_geometry *nfc_geo = &this->bch_geometry;
   1368	unsigned char *p;
   1369	unsigned char *a;
   1370	unsigned int  bit;
   1371	unsigned char mask;
   1372	unsigned char from_data;
   1373	unsigned char from_oob;
   1374
   1375	if (!this->swap_block_mark)
   1376		return;
   1377
   1378	/*
   1379	 * If control arrives here, we're swapping. Make some convenience
   1380	 * variables.
   1381	 */
   1382	bit = nfc_geo->block_mark_bit_offset;
   1383	p   = payload + nfc_geo->block_mark_byte_offset;
   1384	a   = auxiliary;
   1385
   1386	/*
   1387	 * Get the byte from the data area that overlays the block mark. Since
   1388	 * the ECC engine applies its own view to the bits in the page, the
   1389	 * physical block mark won't (in general) appear on a byte boundary in
   1390	 * the data.
   1391	 */
   1392	from_data = (p[0] >> bit) | (p[1] << (8 - bit));
   1393
   1394	/* Get the byte from the OOB. */
   1395	from_oob = a[0];
   1396
   1397	/* Swap them. */
   1398	a[0] = from_data;
   1399
   1400	mask = (0x1 << bit) - 1;
   1401	p[0] = (p[0] & mask) | (from_oob << bit);
   1402
   1403	mask = ~0 << bit;
   1404	p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
   1405}
   1406
   1407static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
   1408			       int last, int meta)
   1409{
   1410	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1411	struct bch_geometry *nfc_geo = &this->bch_geometry;
   1412	struct mtd_info *mtd = nand_to_mtd(chip);
   1413	int i;
   1414	unsigned char *status;
   1415	unsigned int max_bitflips = 0;
   1416
   1417	/* Loop over status bytes, accumulating ECC status. */
   1418	status = this->auxiliary_virt + ALIGN(meta, 4);
   1419
   1420	for (i = first; i < last; i++, status++) {
   1421		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
   1422			continue;
   1423
   1424		if (*status == STATUS_UNCORRECTABLE) {
   1425			int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
   1426			u8 *eccbuf = this->raw_buffer;
   1427			int offset, bitoffset;
   1428			int eccbytes;
   1429			int flips;
   1430
   1431			/* Read ECC bytes into our internal raw_buffer */
   1432			offset = nfc_geo->metadata_size * 8;
   1433			offset += ((8 * nfc_geo->eccn_chunk_size) + eccbits) * (i + 1);
   1434			offset -= eccbits;
   1435			bitoffset = offset % 8;
   1436			eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
   1437			offset /= 8;
   1438			eccbytes -= offset;
   1439			nand_change_read_column_op(chip, offset, eccbuf,
   1440						   eccbytes, false);
   1441
   1442			/*
   1443			 * ECC data are not byte aligned and we may have
   1444			 * in-band data in the first and last byte of
   1445			 * eccbuf. Set non-eccbits to one so that
   1446			 * nand_check_erased_ecc_chunk() does not count them
   1447			 * as bitflips.
   1448			 */
   1449			if (bitoffset)
   1450				eccbuf[0] |= GENMASK(bitoffset - 1, 0);
   1451
   1452			bitoffset = (bitoffset + eccbits) % 8;
   1453			if (bitoffset)
   1454				eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
   1455
   1456			/*
   1457			 * The ECC hardware has an uncorrectable ECC status
   1458			 * code in case we have bitflips in an erased page. As
   1459			 * nothing was written into this subpage the ECC is
   1460			 * obviously wrong and we can not trust it. We assume
   1461			 * at this point that we are reading an erased page and
   1462			 * try to correct the bitflips in buffer up to
   1463			 * ecc_strength bitflips. If this is a page with random
   1464			 * data, we exceed this number of bitflips and have a
   1465			 * ECC failure. Otherwise we use the corrected buffer.
   1466			 */
   1467			if (i == 0) {
   1468				/* The first block includes metadata */
   1469				flips = nand_check_erased_ecc_chunk(
   1470						buf + i * nfc_geo->eccn_chunk_size,
   1471						nfc_geo->eccn_chunk_size,
   1472						eccbuf, eccbytes,
   1473						this->auxiliary_virt,
   1474						nfc_geo->metadata_size,
   1475						nfc_geo->ecc_strength);
   1476			} else {
   1477				flips = nand_check_erased_ecc_chunk(
   1478						buf + i * nfc_geo->eccn_chunk_size,
   1479						nfc_geo->eccn_chunk_size,
   1480						eccbuf, eccbytes,
   1481						NULL, 0,
   1482						nfc_geo->ecc_strength);
   1483			}
   1484
   1485			if (flips > 0) {
   1486				max_bitflips = max_t(unsigned int, max_bitflips,
   1487						     flips);
   1488				mtd->ecc_stats.corrected += flips;
   1489				continue;
   1490			}
   1491
   1492			mtd->ecc_stats.failed++;
   1493			continue;
   1494		}
   1495
   1496		mtd->ecc_stats.corrected += *status;
   1497		max_bitflips = max_t(unsigned int, max_bitflips, *status);
   1498	}
   1499
   1500	return max_bitflips;
   1501}
   1502
   1503static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
   1504{
   1505	struct bch_geometry *geo = &this->bch_geometry;
   1506	unsigned int ecc_strength = geo->ecc_strength >> 1;
   1507	unsigned int gf_len = geo->gf_len;
   1508	unsigned int block0_size = geo->ecc0_chunk_size;
   1509	unsigned int blockn_size = geo->eccn_chunk_size;
   1510
   1511	this->bch_flashlayout0 =
   1512		BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
   1513		BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
   1514		BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
   1515		BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
   1516		BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size, this);
   1517
   1518	this->bch_flashlayout1 =
   1519		BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
   1520		BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
   1521		BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
   1522		BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size, this);
   1523}
   1524
   1525static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
   1526			      int oob_required, int page)
   1527{
   1528	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1529	struct mtd_info *mtd = nand_to_mtd(chip);
   1530	struct bch_geometry *geo = &this->bch_geometry;
   1531	unsigned int max_bitflips;
   1532	int ret;
   1533
   1534	gpmi_bch_layout_std(this);
   1535	this->bch = true;
   1536
   1537	ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
   1538	if (ret)
   1539		return ret;
   1540
   1541	max_bitflips = gpmi_count_bitflips(chip, buf, 0,
   1542					   geo->ecc_chunk_count,
   1543					   geo->auxiliary_status_offset);
   1544
   1545	/* handle the block mark swapping */
   1546	block_mark_swapping(this, buf, this->auxiliary_virt);
   1547
   1548	if (oob_required) {
   1549		/*
   1550		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
   1551		 * for details about our policy for delivering the OOB.
   1552		 *
   1553		 * We fill the caller's buffer with set bits, and then copy the
   1554		 * block mark to th caller's buffer. Note that, if block mark
   1555		 * swapping was necessary, it has already been done, so we can
   1556		 * rely on the first byte of the auxiliary buffer to contain
   1557		 * the block mark.
   1558		 */
   1559		memset(chip->oob_poi, ~0, mtd->oobsize);
   1560		chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
   1561	}
   1562
   1563	return max_bitflips;
   1564}
   1565
   1566/* Fake a virtual small page for the subpage read */
   1567static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
   1568				 uint32_t len, uint8_t *buf, int page)
   1569{
   1570	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1571	struct bch_geometry *geo = &this->bch_geometry;
   1572	int size = chip->ecc.size; /* ECC chunk size */
   1573	int meta, n, page_size;
   1574	unsigned int max_bitflips;
   1575	unsigned int ecc_strength;
   1576	int first, last, marker_pos;
   1577	int ecc_parity_size;
   1578	int col = 0;
   1579	int ret;
   1580
   1581	/* The size of ECC parity */
   1582	ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
   1583
   1584	/* Align it with the chunk size */
   1585	first = offs / size;
   1586	last = (offs + len - 1) / size;
   1587
   1588	if (this->swap_block_mark) {
   1589		/*
   1590		 * Find the chunk which contains the Block Marker.
   1591		 * If this chunk is in the range of [first, last],
   1592		 * we have to read out the whole page.
   1593		 * Why? since we had swapped the data at the position of Block
   1594		 * Marker to the metadata which is bound with the chunk 0.
   1595		 */
   1596		marker_pos = geo->block_mark_byte_offset / size;
   1597		if (last >= marker_pos && first <= marker_pos) {
   1598			dev_dbg(this->dev,
   1599				"page:%d, first:%d, last:%d, marker at:%d\n",
   1600				page, first, last, marker_pos);
   1601			return gpmi_ecc_read_page(chip, buf, 0, page);
   1602		}
   1603	}
   1604
   1605	/*
   1606	 * if there is an ECC dedicate for meta:
   1607	 * - need to add an extra ECC size when calculating col and page_size,
   1608	 *   if the meta size is NOT zero.
   1609	 * - ecc0_chunk size need to set to the same size as other chunks,
   1610	 *   if the meta size is zero.
   1611	 */
   1612
   1613	meta = geo->metadata_size;
   1614	if (first) {
   1615		if (geo->ecc_for_meta)
   1616			col = meta + ecc_parity_size
   1617				+ (size + ecc_parity_size) * first;
   1618		else
   1619			col = meta + (size + ecc_parity_size) * first;
   1620
   1621		meta = 0;
   1622		buf = buf + first * size;
   1623	}
   1624
   1625	ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
   1626	n = last - first + 1;
   1627
   1628	if (geo->ecc_for_meta && meta)
   1629		page_size = meta + ecc_parity_size
   1630			    + (size + ecc_parity_size) * n;
   1631	else
   1632		page_size = meta + (size + ecc_parity_size) * n;
   1633
   1634	ecc_strength = geo->ecc_strength >> 1;
   1635
   1636	this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(
   1637		(geo->ecc_for_meta ? n : n - 1)) |
   1638		BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
   1639		BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
   1640		BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
   1641		BF_BCH_FLASH0LAYOUT0_DATA0_SIZE((geo->ecc_for_meta ?
   1642		0 : geo->ecc0_chunk_size), this);
   1643
   1644	this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
   1645		BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
   1646		BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
   1647		BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->eccn_chunk_size, this);
   1648
   1649	this->bch = true;
   1650
   1651	ret = nand_read_page_op(chip, page, col, buf, page_size);
   1652	if (ret)
   1653		return ret;
   1654
   1655	dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
   1656		page, offs, len, col, first, n, page_size);
   1657
   1658	max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
   1659
   1660	return max_bitflips;
   1661}
   1662
   1663static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
   1664			       int oob_required, int page)
   1665{
   1666	struct mtd_info *mtd = nand_to_mtd(chip);
   1667	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1668	struct bch_geometry *nfc_geo = &this->bch_geometry;
   1669
   1670	dev_dbg(this->dev, "ecc write page.\n");
   1671
   1672	gpmi_bch_layout_std(this);
   1673	this->bch = true;
   1674
   1675	memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
   1676
   1677	if (this->swap_block_mark) {
   1678		/*
   1679		 * When doing bad block marker swapping we must always copy the
   1680		 * input buffer as we can't modify the const buffer.
   1681		 */
   1682		memcpy(this->data_buffer_dma, buf, mtd->writesize);
   1683		buf = this->data_buffer_dma;
   1684		block_mark_swapping(this, this->data_buffer_dma,
   1685				    this->auxiliary_virt);
   1686	}
   1687
   1688	return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
   1689}
   1690
   1691/*
   1692 * There are several places in this driver where we have to handle the OOB and
   1693 * block marks. This is the function where things are the most complicated, so
   1694 * this is where we try to explain it all. All the other places refer back to
   1695 * here.
   1696 *
   1697 * These are the rules, in order of decreasing importance:
   1698 *
   1699 * 1) Nothing the caller does can be allowed to imperil the block mark.
   1700 *
   1701 * 2) In read operations, the first byte of the OOB we return must reflect the
   1702 *    true state of the block mark, no matter where that block mark appears in
   1703 *    the physical page.
   1704 *
   1705 * 3) ECC-based read operations return an OOB full of set bits (since we never
   1706 *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
   1707 *    return).
   1708 *
   1709 * 4) "Raw" read operations return a direct view of the physical bytes in the
   1710 *    page, using the conventional definition of which bytes are data and which
   1711 *    are OOB. This gives the caller a way to see the actual, physical bytes
   1712 *    in the page, without the distortions applied by our ECC engine.
   1713 *
   1714 *
   1715 * What we do for this specific read operation depends on two questions:
   1716 *
   1717 * 1) Are we doing a "raw" read, or an ECC-based read?
   1718 *
   1719 * 2) Are we using block mark swapping or transcription?
   1720 *
   1721 * There are four cases, illustrated by the following Karnaugh map:
   1722 *
   1723 *                    |           Raw           |         ECC-based       |
   1724 *       -------------+-------------------------+-------------------------+
   1725 *                    | Read the conventional   |                         |
   1726 *                    | OOB at the end of the   |                         |
   1727 *       Swapping     | page and return it. It  |                         |
   1728 *                    | contains exactly what   |                         |
   1729 *                    | we want.                | Read the block mark and |
   1730 *       -------------+-------------------------+ return it in a buffer   |
   1731 *                    | Read the conventional   | full of set bits.       |
   1732 *                    | OOB at the end of the   |                         |
   1733 *                    | page and also the block |                         |
   1734 *       Transcribing | mark in the metadata.   |                         |
   1735 *                    | Copy the block mark     |                         |
   1736 *                    | into the first byte of  |                         |
   1737 *                    | the OOB.                |                         |
   1738 *       -------------+-------------------------+-------------------------+
   1739 *
   1740 * Note that we break rule #4 in the Transcribing/Raw case because we're not
   1741 * giving an accurate view of the actual, physical bytes in the page (we're
   1742 * overwriting the block mark). That's OK because it's more important to follow
   1743 * rule #2.
   1744 *
   1745 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
   1746 * easy. When reading a page, for example, the NAND Flash MTD code calls our
   1747 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
   1748 * ECC-based or raw view of the page is implicit in which function it calls
   1749 * (there is a similar pair of ECC-based/raw functions for writing).
   1750 */
   1751static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
   1752{
   1753	struct mtd_info *mtd = nand_to_mtd(chip);
   1754	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1755	int ret;
   1756
   1757	/* clear the OOB buffer */
   1758	memset(chip->oob_poi, ~0, mtd->oobsize);
   1759
   1760	/* Read out the conventional OOB. */
   1761	ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
   1762				mtd->oobsize);
   1763	if (ret)
   1764		return ret;
   1765
   1766	/*
   1767	 * Now, we want to make sure the block mark is correct. In the
   1768	 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
   1769	 * Otherwise, we need to explicitly read it.
   1770	 */
   1771	if (GPMI_IS_MX23(this)) {
   1772		/* Read the block mark into the first byte of the OOB buffer. */
   1773		ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
   1774		if (ret)
   1775			return ret;
   1776	}
   1777
   1778	return 0;
   1779}
   1780
   1781static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
   1782{
   1783	struct mtd_info *mtd = nand_to_mtd(chip);
   1784	struct mtd_oob_region of = { };
   1785
   1786	/* Do we have available oob area? */
   1787	mtd_ooblayout_free(mtd, 0, &of);
   1788	if (!of.length)
   1789		return -EPERM;
   1790
   1791	if (!nand_is_slc(chip))
   1792		return -EPERM;
   1793
   1794	return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
   1795				 chip->oob_poi + of.offset, of.length);
   1796}
   1797
   1798/*
   1799 * This function reads a NAND page without involving the ECC engine (no HW
   1800 * ECC correction).
   1801 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
   1802 * inline (interleaved with payload DATA), and do not align data chunk on
   1803 * byte boundaries.
   1804 * We thus need to take care moving the payload data and ECC bits stored in the
   1805 * page into the provided buffers, which is why we're using nand_extract_bits().
   1806 *
   1807 * See set_geometry_by_ecc_info inline comments to have a full description
   1808 * of the layout used by the GPMI controller.
   1809 */
   1810static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
   1811				  int oob_required, int page)
   1812{
   1813	struct mtd_info *mtd = nand_to_mtd(chip);
   1814	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1815	struct bch_geometry *nfc_geo = &this->bch_geometry;
   1816	int eccsize = nfc_geo->eccn_chunk_size;
   1817	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
   1818	u8 *tmp_buf = this->raw_buffer;
   1819	size_t src_bit_off;
   1820	size_t oob_bit_off;
   1821	size_t oob_byte_off;
   1822	uint8_t *oob = chip->oob_poi;
   1823	int step;
   1824	int ret;
   1825
   1826	ret = nand_read_page_op(chip, page, 0, tmp_buf,
   1827				mtd->writesize + mtd->oobsize);
   1828	if (ret)
   1829		return ret;
   1830
   1831	/*
   1832	 * If required, swap the bad block marker and the data stored in the
   1833	 * metadata section, so that we don't wrongly consider a block as bad.
   1834	 *
   1835	 * See the layout description for a detailed explanation on why this
   1836	 * is needed.
   1837	 */
   1838	if (this->swap_block_mark)
   1839		swap(tmp_buf[0], tmp_buf[mtd->writesize]);
   1840
   1841	/*
   1842	 * Copy the metadata section into the oob buffer (this section is
   1843	 * guaranteed to be aligned on a byte boundary).
   1844	 */
   1845	if (oob_required)
   1846		memcpy(oob, tmp_buf, nfc_geo->metadata_size);
   1847
   1848	oob_bit_off = nfc_geo->metadata_size * 8;
   1849	src_bit_off = oob_bit_off;
   1850
   1851	/* Extract interleaved payload data and ECC bits */
   1852	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
   1853		if (buf)
   1854			nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
   1855					  src_bit_off, eccsize * 8);
   1856		src_bit_off += eccsize * 8;
   1857
   1858		/* Align last ECC block to align a byte boundary */
   1859		if (step == nfc_geo->ecc_chunk_count - 1 &&
   1860		    (oob_bit_off + eccbits) % 8)
   1861			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
   1862
   1863		if (oob_required)
   1864			nand_extract_bits(oob, oob_bit_off, tmp_buf,
   1865					  src_bit_off, eccbits);
   1866
   1867		src_bit_off += eccbits;
   1868		oob_bit_off += eccbits;
   1869	}
   1870
   1871	if (oob_required) {
   1872		oob_byte_off = oob_bit_off / 8;
   1873
   1874		if (oob_byte_off < mtd->oobsize)
   1875			memcpy(oob + oob_byte_off,
   1876			       tmp_buf + mtd->writesize + oob_byte_off,
   1877			       mtd->oobsize - oob_byte_off);
   1878	}
   1879
   1880	return 0;
   1881}
   1882
   1883/*
   1884 * This function writes a NAND page without involving the ECC engine (no HW
   1885 * ECC generation).
   1886 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
   1887 * inline (interleaved with payload DATA), and do not align data chunk on
   1888 * byte boundaries.
   1889 * We thus need to take care moving the OOB area at the right place in the
   1890 * final page, which is why we're using nand_extract_bits().
   1891 *
   1892 * See set_geometry_by_ecc_info inline comments to have a full description
   1893 * of the layout used by the GPMI controller.
   1894 */
   1895static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
   1896				   int oob_required, int page)
   1897{
   1898	struct mtd_info *mtd = nand_to_mtd(chip);
   1899	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1900	struct bch_geometry *nfc_geo = &this->bch_geometry;
   1901	int eccsize = nfc_geo->eccn_chunk_size;
   1902	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
   1903	u8 *tmp_buf = this->raw_buffer;
   1904	uint8_t *oob = chip->oob_poi;
   1905	size_t dst_bit_off;
   1906	size_t oob_bit_off;
   1907	size_t oob_byte_off;
   1908	int step;
   1909
   1910	/*
   1911	 * Initialize all bits to 1 in case we don't have a buffer for the
   1912	 * payload or oob data in order to leave unspecified bits of data
   1913	 * to their initial state.
   1914	 */
   1915	if (!buf || !oob_required)
   1916		memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
   1917
   1918	/*
   1919	 * First copy the metadata section (stored in oob buffer) at the
   1920	 * beginning of the page, as imposed by the GPMI layout.
   1921	 */
   1922	memcpy(tmp_buf, oob, nfc_geo->metadata_size);
   1923	oob_bit_off = nfc_geo->metadata_size * 8;
   1924	dst_bit_off = oob_bit_off;
   1925
   1926	/* Interleave payload data and ECC bits */
   1927	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
   1928		if (buf)
   1929			nand_extract_bits(tmp_buf, dst_bit_off, buf,
   1930					  step * eccsize * 8, eccsize * 8);
   1931		dst_bit_off += eccsize * 8;
   1932
   1933		/* Align last ECC block to align a byte boundary */
   1934		if (step == nfc_geo->ecc_chunk_count - 1 &&
   1935		    (oob_bit_off + eccbits) % 8)
   1936			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
   1937
   1938		if (oob_required)
   1939			nand_extract_bits(tmp_buf, dst_bit_off, oob,
   1940					  oob_bit_off, eccbits);
   1941
   1942		dst_bit_off += eccbits;
   1943		oob_bit_off += eccbits;
   1944	}
   1945
   1946	oob_byte_off = oob_bit_off / 8;
   1947
   1948	if (oob_required && oob_byte_off < mtd->oobsize)
   1949		memcpy(tmp_buf + mtd->writesize + oob_byte_off,
   1950		       oob + oob_byte_off, mtd->oobsize - oob_byte_off);
   1951
   1952	/*
   1953	 * If required, swap the bad block marker and the first byte of the
   1954	 * metadata section, so that we don't modify the bad block marker.
   1955	 *
   1956	 * See the layout description for a detailed explanation on why this
   1957	 * is needed.
   1958	 */
   1959	if (this->swap_block_mark)
   1960		swap(tmp_buf[0], tmp_buf[mtd->writesize]);
   1961
   1962	return nand_prog_page_op(chip, page, 0, tmp_buf,
   1963				 mtd->writesize + mtd->oobsize);
   1964}
   1965
   1966static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
   1967{
   1968	return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
   1969}
   1970
   1971static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
   1972{
   1973	return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
   1974}
   1975
   1976static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
   1977{
   1978	struct mtd_info *mtd = nand_to_mtd(chip);
   1979	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   1980	int ret = 0;
   1981	uint8_t *block_mark;
   1982	int column, page, chipnr;
   1983
   1984	chipnr = (int)(ofs >> chip->chip_shift);
   1985	nand_select_target(chip, chipnr);
   1986
   1987	column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
   1988
   1989	/* Write the block mark. */
   1990	block_mark = this->data_buffer_dma;
   1991	block_mark[0] = 0; /* bad block marker */
   1992
   1993	/* Shift to get page */
   1994	page = (int)(ofs >> chip->page_shift);
   1995
   1996	ret = nand_prog_page_op(chip, page, column, block_mark, 1);
   1997
   1998	nand_deselect_target(chip);
   1999
   2000	return ret;
   2001}
   2002
   2003static int nand_boot_set_geometry(struct gpmi_nand_data *this)
   2004{
   2005	struct boot_rom_geometry *geometry = &this->rom_geometry;
   2006
   2007	/*
   2008	 * Set the boot block stride size.
   2009	 *
   2010	 * In principle, we should be reading this from the OTP bits, since
   2011	 * that's where the ROM is going to get it. In fact, we don't have any
   2012	 * way to read the OTP bits, so we go with the default and hope for the
   2013	 * best.
   2014	 */
   2015	geometry->stride_size_in_pages = 64;
   2016
   2017	/*
   2018	 * Set the search area stride exponent.
   2019	 *
   2020	 * In principle, we should be reading this from the OTP bits, since
   2021	 * that's where the ROM is going to get it. In fact, we don't have any
   2022	 * way to read the OTP bits, so we go with the default and hope for the
   2023	 * best.
   2024	 */
   2025	geometry->search_area_stride_exponent = 2;
   2026	return 0;
   2027}
   2028
   2029static const char  *fingerprint = "STMP";
   2030static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
   2031{
   2032	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
   2033	struct device *dev = this->dev;
   2034	struct nand_chip *chip = &this->nand;
   2035	unsigned int search_area_size_in_strides;
   2036	unsigned int stride;
   2037	unsigned int page;
   2038	u8 *buffer = nand_get_data_buf(chip);
   2039	int found_an_ncb_fingerprint = false;
   2040	int ret;
   2041
   2042	/* Compute the number of strides in a search area. */
   2043	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
   2044
   2045	nand_select_target(chip, 0);
   2046
   2047	/*
   2048	 * Loop through the first search area, looking for the NCB fingerprint.
   2049	 */
   2050	dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
   2051
   2052	for (stride = 0; stride < search_area_size_in_strides; stride++) {
   2053		/* Compute the page addresses. */
   2054		page = stride * rom_geo->stride_size_in_pages;
   2055
   2056		dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
   2057
   2058		/*
   2059		 * Read the NCB fingerprint. The fingerprint is four bytes long
   2060		 * and starts in the 12th byte of the page.
   2061		 */
   2062		ret = nand_read_page_op(chip, page, 12, buffer,
   2063					strlen(fingerprint));
   2064		if (ret)
   2065			continue;
   2066
   2067		/* Look for the fingerprint. */
   2068		if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
   2069			found_an_ncb_fingerprint = true;
   2070			break;
   2071		}
   2072
   2073	}
   2074
   2075	nand_deselect_target(chip);
   2076
   2077	if (found_an_ncb_fingerprint)
   2078		dev_dbg(dev, "\tFound a fingerprint\n");
   2079	else
   2080		dev_dbg(dev, "\tNo fingerprint found\n");
   2081	return found_an_ncb_fingerprint;
   2082}
   2083
   2084/* Writes a transcription stamp. */
   2085static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
   2086{
   2087	struct device *dev = this->dev;
   2088	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
   2089	struct nand_chip *chip = &this->nand;
   2090	struct mtd_info *mtd = nand_to_mtd(chip);
   2091	unsigned int block_size_in_pages;
   2092	unsigned int search_area_size_in_strides;
   2093	unsigned int search_area_size_in_pages;
   2094	unsigned int search_area_size_in_blocks;
   2095	unsigned int block;
   2096	unsigned int stride;
   2097	unsigned int page;
   2098	u8 *buffer = nand_get_data_buf(chip);
   2099	int status;
   2100
   2101	/* Compute the search area geometry. */
   2102	block_size_in_pages = mtd->erasesize / mtd->writesize;
   2103	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
   2104	search_area_size_in_pages = search_area_size_in_strides *
   2105					rom_geo->stride_size_in_pages;
   2106	search_area_size_in_blocks =
   2107		  (search_area_size_in_pages + (block_size_in_pages - 1)) /
   2108				    block_size_in_pages;
   2109
   2110	dev_dbg(dev, "Search Area Geometry :\n");
   2111	dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
   2112	dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
   2113	dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
   2114
   2115	nand_select_target(chip, 0);
   2116
   2117	/* Loop over blocks in the first search area, erasing them. */
   2118	dev_dbg(dev, "Erasing the search area...\n");
   2119
   2120	for (block = 0; block < search_area_size_in_blocks; block++) {
   2121		/* Erase this block. */
   2122		dev_dbg(dev, "\tErasing block 0x%x\n", block);
   2123		status = nand_erase_op(chip, block);
   2124		if (status)
   2125			dev_err(dev, "[%s] Erase failed.\n", __func__);
   2126	}
   2127
   2128	/* Write the NCB fingerprint into the page buffer. */
   2129	memset(buffer, ~0, mtd->writesize);
   2130	memcpy(buffer + 12, fingerprint, strlen(fingerprint));
   2131
   2132	/* Loop through the first search area, writing NCB fingerprints. */
   2133	dev_dbg(dev, "Writing NCB fingerprints...\n");
   2134	for (stride = 0; stride < search_area_size_in_strides; stride++) {
   2135		/* Compute the page addresses. */
   2136		page = stride * rom_geo->stride_size_in_pages;
   2137
   2138		/* Write the first page of the current stride. */
   2139		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
   2140
   2141		status = chip->ecc.write_page_raw(chip, buffer, 0, page);
   2142		if (status)
   2143			dev_err(dev, "[%s] Write failed.\n", __func__);
   2144	}
   2145
   2146	nand_deselect_target(chip);
   2147
   2148	return 0;
   2149}
   2150
   2151static int mx23_boot_init(struct gpmi_nand_data  *this)
   2152{
   2153	struct device *dev = this->dev;
   2154	struct nand_chip *chip = &this->nand;
   2155	struct mtd_info *mtd = nand_to_mtd(chip);
   2156	unsigned int block_count;
   2157	unsigned int block;
   2158	int     chipnr;
   2159	int     page;
   2160	loff_t  byte;
   2161	uint8_t block_mark;
   2162	int     ret = 0;
   2163
   2164	/*
   2165	 * If control arrives here, we can't use block mark swapping, which
   2166	 * means we're forced to use transcription. First, scan for the
   2167	 * transcription stamp. If we find it, then we don't have to do
   2168	 * anything -- the block marks are already transcribed.
   2169	 */
   2170	if (mx23_check_transcription_stamp(this))
   2171		return 0;
   2172
   2173	/*
   2174	 * If control arrives here, we couldn't find a transcription stamp, so
   2175	 * so we presume the block marks are in the conventional location.
   2176	 */
   2177	dev_dbg(dev, "Transcribing bad block marks...\n");
   2178
   2179	/* Compute the number of blocks in the entire medium. */
   2180	block_count = nanddev_eraseblocks_per_target(&chip->base);
   2181
   2182	/*
   2183	 * Loop over all the blocks in the medium, transcribing block marks as
   2184	 * we go.
   2185	 */
   2186	for (block = 0; block < block_count; block++) {
   2187		/*
   2188		 * Compute the chip, page and byte addresses for this block's
   2189		 * conventional mark.
   2190		 */
   2191		chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
   2192		page = block << (chip->phys_erase_shift - chip->page_shift);
   2193		byte = block <<  chip->phys_erase_shift;
   2194
   2195		/* Send the command to read the conventional block mark. */
   2196		nand_select_target(chip, chipnr);
   2197		ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
   2198					1);
   2199		nand_deselect_target(chip);
   2200
   2201		if (ret)
   2202			continue;
   2203
   2204		/*
   2205		 * Check if the block is marked bad. If so, we need to mark it
   2206		 * again, but this time the result will be a mark in the
   2207		 * location where we transcribe block marks.
   2208		 */
   2209		if (block_mark != 0xff) {
   2210			dev_dbg(dev, "Transcribing mark in block %u\n", block);
   2211			ret = chip->legacy.block_markbad(chip, byte);
   2212			if (ret)
   2213				dev_err(dev,
   2214					"Failed to mark block bad with ret %d\n",
   2215					ret);
   2216		}
   2217	}
   2218
   2219	/* Write the stamp that indicates we've transcribed the block marks. */
   2220	mx23_write_transcription_stamp(this);
   2221	return 0;
   2222}
   2223
   2224static int nand_boot_init(struct gpmi_nand_data  *this)
   2225{
   2226	nand_boot_set_geometry(this);
   2227
   2228	/* This is ROM arch-specific initilization before the BBT scanning. */
   2229	if (GPMI_IS_MX23(this))
   2230		return mx23_boot_init(this);
   2231	return 0;
   2232}
   2233
   2234static int gpmi_set_geometry(struct gpmi_nand_data *this)
   2235{
   2236	int ret;
   2237
   2238	/* Free the temporary DMA memory for reading ID. */
   2239	gpmi_free_dma_buffer(this);
   2240
   2241	/* Set up the NFC geometry which is used by BCH. */
   2242	ret = bch_set_geometry(this);
   2243	if (ret) {
   2244		dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
   2245		return ret;
   2246	}
   2247
   2248	/* Alloc the new DMA buffers according to the pagesize and oobsize */
   2249	return gpmi_alloc_dma_buffer(this);
   2250}
   2251
   2252static int gpmi_init_last(struct gpmi_nand_data *this)
   2253{
   2254	struct nand_chip *chip = &this->nand;
   2255	struct mtd_info *mtd = nand_to_mtd(chip);
   2256	struct nand_ecc_ctrl *ecc = &chip->ecc;
   2257	struct bch_geometry *bch_geo = &this->bch_geometry;
   2258	int ret;
   2259
   2260	/* Set up the medium geometry */
   2261	ret = gpmi_set_geometry(this);
   2262	if (ret)
   2263		return ret;
   2264
   2265	/* Init the nand_ecc_ctrl{} */
   2266	ecc->read_page	= gpmi_ecc_read_page;
   2267	ecc->write_page	= gpmi_ecc_write_page;
   2268	ecc->read_oob	= gpmi_ecc_read_oob;
   2269	ecc->write_oob	= gpmi_ecc_write_oob;
   2270	ecc->read_page_raw = gpmi_ecc_read_page_raw;
   2271	ecc->write_page_raw = gpmi_ecc_write_page_raw;
   2272	ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
   2273	ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
   2274	ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
   2275	ecc->size	= bch_geo->eccn_chunk_size;
   2276	ecc->strength	= bch_geo->ecc_strength;
   2277	mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
   2278
   2279	/*
   2280	 * We only enable the subpage read when:
   2281	 *  (1) the chip is imx6, and
   2282	 *  (2) the size of the ECC parity is byte aligned.
   2283	 */
   2284	if (GPMI_IS_MX6(this) &&
   2285		((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
   2286		ecc->read_subpage = gpmi_ecc_read_subpage;
   2287		chip->options |= NAND_SUBPAGE_READ;
   2288	}
   2289
   2290	return 0;
   2291}
   2292
   2293static int gpmi_nand_attach_chip(struct nand_chip *chip)
   2294{
   2295	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   2296	int ret;
   2297
   2298	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
   2299		chip->bbt_options |= NAND_BBT_NO_OOB;
   2300
   2301		if (of_property_read_bool(this->dev->of_node,
   2302					  "fsl,no-blockmark-swap"))
   2303			this->swap_block_mark = false;
   2304	}
   2305	dev_dbg(this->dev, "Blockmark swapping %sabled\n",
   2306		this->swap_block_mark ? "en" : "dis");
   2307
   2308	ret = gpmi_init_last(this);
   2309	if (ret)
   2310		return ret;
   2311
   2312	chip->options |= NAND_SKIP_BBTSCAN;
   2313
   2314	return 0;
   2315}
   2316
   2317static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
   2318{
   2319	struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
   2320
   2321	this->ntransfers++;
   2322
   2323	if (this->ntransfers == GPMI_MAX_TRANSFERS)
   2324		return NULL;
   2325
   2326	return transfer;
   2327}
   2328
   2329static struct dma_async_tx_descriptor *gpmi_chain_command(
   2330	struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
   2331{
   2332	struct dma_chan *channel = get_dma_chan(this);
   2333	struct dma_async_tx_descriptor *desc;
   2334	struct gpmi_transfer *transfer;
   2335	int chip = this->nand.cur_cs;
   2336	u32 pio[3];
   2337
   2338	/* [1] send out the PIO words */
   2339	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
   2340		| BM_GPMI_CTRL0_WORD_LENGTH
   2341		| BF_GPMI_CTRL0_CS(chip, this)
   2342		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
   2343		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
   2344		| BM_GPMI_CTRL0_ADDRESS_INCREMENT
   2345		| BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
   2346	pio[1] = 0;
   2347	pio[2] = 0;
   2348	desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
   2349				      DMA_TRANS_NONE, 0);
   2350	if (!desc)
   2351		return NULL;
   2352
   2353	transfer = get_next_transfer(this);
   2354	if (!transfer)
   2355		return NULL;
   2356
   2357	transfer->cmdbuf[0] = cmd;
   2358	if (naddr)
   2359		memcpy(&transfer->cmdbuf[1], addr, naddr);
   2360
   2361	sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
   2362	dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
   2363
   2364	transfer->direction = DMA_TO_DEVICE;
   2365
   2366	desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
   2367				       MXS_DMA_CTRL_WAIT4END);
   2368	return desc;
   2369}
   2370
   2371static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
   2372	struct gpmi_nand_data *this)
   2373{
   2374	struct dma_chan *channel = get_dma_chan(this);
   2375	u32 pio[2];
   2376
   2377	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
   2378		| BM_GPMI_CTRL0_WORD_LENGTH
   2379		| BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
   2380		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
   2381		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
   2382		| BF_GPMI_CTRL0_XFER_COUNT(0);
   2383	pio[1] = 0;
   2384
   2385	return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
   2386				MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
   2387}
   2388
   2389static struct dma_async_tx_descriptor *gpmi_chain_data_read(
   2390	struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
   2391{
   2392	struct dma_async_tx_descriptor *desc;
   2393	struct dma_chan *channel = get_dma_chan(this);
   2394	struct gpmi_transfer *transfer;
   2395	u32 pio[6] = {};
   2396
   2397	transfer = get_next_transfer(this);
   2398	if (!transfer)
   2399		return NULL;
   2400
   2401	transfer->direction = DMA_FROM_DEVICE;
   2402
   2403	*direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
   2404				   DMA_FROM_DEVICE);
   2405
   2406	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
   2407		| BM_GPMI_CTRL0_WORD_LENGTH
   2408		| BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
   2409		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
   2410		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
   2411		| BF_GPMI_CTRL0_XFER_COUNT(raw_len);
   2412
   2413	if (this->bch) {
   2414		pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
   2415			| BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
   2416			| BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
   2417				| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
   2418		pio[3] = raw_len;
   2419		pio[4] = transfer->sgl.dma_address;
   2420		pio[5] = this->auxiliary_phys;
   2421	}
   2422
   2423	desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
   2424				      DMA_TRANS_NONE, 0);
   2425	if (!desc)
   2426		return NULL;
   2427
   2428	if (!this->bch)
   2429		desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
   2430					     DMA_DEV_TO_MEM,
   2431					     MXS_DMA_CTRL_WAIT4END);
   2432
   2433	return desc;
   2434}
   2435
   2436static struct dma_async_tx_descriptor *gpmi_chain_data_write(
   2437	struct gpmi_nand_data *this, const void *buf, int raw_len)
   2438{
   2439	struct dma_chan *channel = get_dma_chan(this);
   2440	struct dma_async_tx_descriptor *desc;
   2441	struct gpmi_transfer *transfer;
   2442	u32 pio[6] = {};
   2443
   2444	transfer = get_next_transfer(this);
   2445	if (!transfer)
   2446		return NULL;
   2447
   2448	transfer->direction = DMA_TO_DEVICE;
   2449
   2450	prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
   2451
   2452	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
   2453		| BM_GPMI_CTRL0_WORD_LENGTH
   2454		| BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
   2455		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
   2456		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
   2457		| BF_GPMI_CTRL0_XFER_COUNT(raw_len);
   2458
   2459	if (this->bch) {
   2460		pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
   2461			| BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
   2462			| BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
   2463					BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
   2464		pio[3] = raw_len;
   2465		pio[4] = transfer->sgl.dma_address;
   2466		pio[5] = this->auxiliary_phys;
   2467	}
   2468
   2469	desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
   2470				      DMA_TRANS_NONE,
   2471				      (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
   2472	if (!desc)
   2473		return NULL;
   2474
   2475	if (!this->bch)
   2476		desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
   2477					       DMA_MEM_TO_DEV,
   2478					       MXS_DMA_CTRL_WAIT4END);
   2479
   2480	return desc;
   2481}
   2482
   2483static int gpmi_nfc_exec_op(struct nand_chip *chip,
   2484			     const struct nand_operation *op,
   2485			     bool check_only)
   2486{
   2487	const struct nand_op_instr *instr;
   2488	struct gpmi_nand_data *this = nand_get_controller_data(chip);
   2489	struct dma_async_tx_descriptor *desc = NULL;
   2490	int i, ret, buf_len = 0, nbufs = 0;
   2491	u8 cmd = 0;
   2492	void *buf_read = NULL;
   2493	const void *buf_write = NULL;
   2494	bool direct = false;
   2495	struct completion *dma_completion, *bch_completion;
   2496	unsigned long to;
   2497
   2498	if (check_only)
   2499		return 0;
   2500
   2501	this->ntransfers = 0;
   2502	for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
   2503		this->transfers[i].direction = DMA_NONE;
   2504
   2505	ret = pm_runtime_get_sync(this->dev);
   2506	if (ret < 0) {
   2507		pm_runtime_put_noidle(this->dev);
   2508		return ret;
   2509	}
   2510
   2511	/*
   2512	 * This driver currently supports only one NAND chip. Plus, dies share
   2513	 * the same configuration. So once timings have been applied on the
   2514	 * controller side, they will not change anymore. When the time will
   2515	 * come, the check on must_apply_timings will have to be dropped.
   2516	 */
   2517	if (this->hw.must_apply_timings) {
   2518		this->hw.must_apply_timings = false;
   2519		ret = gpmi_nfc_apply_timings(this);
   2520		if (ret)
   2521			goto out_pm;
   2522	}
   2523
   2524	dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
   2525
   2526	for (i = 0; i < op->ninstrs; i++) {
   2527		instr = &op->instrs[i];
   2528
   2529		nand_op_trace("  ", instr);
   2530
   2531		switch (instr->type) {
   2532		case NAND_OP_WAITRDY_INSTR:
   2533			desc = gpmi_chain_wait_ready(this);
   2534			break;
   2535		case NAND_OP_CMD_INSTR:
   2536			cmd = instr->ctx.cmd.opcode;
   2537
   2538			/*
   2539			 * When this command has an address cycle chain it
   2540			 * together with the address cycle
   2541			 */
   2542			if (i + 1 != op->ninstrs &&
   2543			    op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
   2544				continue;
   2545
   2546			desc = gpmi_chain_command(this, cmd, NULL, 0);
   2547
   2548			break;
   2549		case NAND_OP_ADDR_INSTR:
   2550			desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
   2551						  instr->ctx.addr.naddrs);
   2552			break;
   2553		case NAND_OP_DATA_OUT_INSTR:
   2554			buf_write = instr->ctx.data.buf.out;
   2555			buf_len = instr->ctx.data.len;
   2556			nbufs++;
   2557
   2558			desc = gpmi_chain_data_write(this, buf_write, buf_len);
   2559
   2560			break;
   2561		case NAND_OP_DATA_IN_INSTR:
   2562			if (!instr->ctx.data.len)
   2563				break;
   2564			buf_read = instr->ctx.data.buf.in;
   2565			buf_len = instr->ctx.data.len;
   2566			nbufs++;
   2567
   2568			desc = gpmi_chain_data_read(this, buf_read, buf_len,
   2569						   &direct);
   2570			break;
   2571		}
   2572
   2573		if (!desc) {
   2574			ret = -ENXIO;
   2575			goto unmap;
   2576		}
   2577	}
   2578
   2579	dev_dbg(this->dev, "%s setup done\n", __func__);
   2580
   2581	if (nbufs > 1) {
   2582		dev_err(this->dev, "Multiple data instructions not supported\n");
   2583		ret = -EINVAL;
   2584		goto unmap;
   2585	}
   2586
   2587	if (this->bch) {
   2588		writel(this->bch_flashlayout0,
   2589		       this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
   2590		writel(this->bch_flashlayout1,
   2591		       this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
   2592	}
   2593
   2594	desc->callback = dma_irq_callback;
   2595	desc->callback_param = this;
   2596	dma_completion = &this->dma_done;
   2597	bch_completion = NULL;
   2598
   2599	init_completion(dma_completion);
   2600
   2601	if (this->bch && buf_read) {
   2602		writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
   2603		       this->resources.bch_regs + HW_BCH_CTRL_SET);
   2604		bch_completion = &this->bch_done;
   2605		init_completion(bch_completion);
   2606	}
   2607
   2608	dmaengine_submit(desc);
   2609	dma_async_issue_pending(get_dma_chan(this));
   2610
   2611	to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
   2612	if (!to) {
   2613		dev_err(this->dev, "DMA timeout, last DMA\n");
   2614		gpmi_dump_info(this);
   2615		ret = -ETIMEDOUT;
   2616		goto unmap;
   2617	}
   2618
   2619	if (this->bch && buf_read) {
   2620		to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
   2621		if (!to) {
   2622			dev_err(this->dev, "BCH timeout, last DMA\n");
   2623			gpmi_dump_info(this);
   2624			ret = -ETIMEDOUT;
   2625			goto unmap;
   2626		}
   2627	}
   2628
   2629	writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
   2630	       this->resources.bch_regs + HW_BCH_CTRL_CLR);
   2631	gpmi_clear_bch(this);
   2632
   2633	ret = 0;
   2634
   2635unmap:
   2636	for (i = 0; i < this->ntransfers; i++) {
   2637		struct gpmi_transfer *transfer = &this->transfers[i];
   2638
   2639		if (transfer->direction != DMA_NONE)
   2640			dma_unmap_sg(this->dev, &transfer->sgl, 1,
   2641				     transfer->direction);
   2642	}
   2643
   2644	if (!ret && buf_read && !direct)
   2645		memcpy(buf_read, this->data_buffer_dma,
   2646		       gpmi_raw_len_to_len(this, buf_len));
   2647
   2648	this->bch = false;
   2649
   2650out_pm:
   2651	pm_runtime_mark_last_busy(this->dev);
   2652	pm_runtime_put_autosuspend(this->dev);
   2653
   2654	return ret;
   2655}
   2656
   2657static const struct nand_controller_ops gpmi_nand_controller_ops = {
   2658	.attach_chip = gpmi_nand_attach_chip,
   2659	.setup_interface = gpmi_setup_interface,
   2660	.exec_op = gpmi_nfc_exec_op,
   2661};
   2662
   2663static int gpmi_nand_init(struct gpmi_nand_data *this)
   2664{
   2665	struct nand_chip *chip = &this->nand;
   2666	struct mtd_info  *mtd = nand_to_mtd(chip);
   2667	int ret;
   2668
   2669	/* init the MTD data structures */
   2670	mtd->name		= "gpmi-nand";
   2671	mtd->dev.parent		= this->dev;
   2672
   2673	/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
   2674	nand_set_controller_data(chip, this);
   2675	nand_set_flash_node(chip, this->pdev->dev.of_node);
   2676	chip->legacy.block_markbad = gpmi_block_markbad;
   2677	chip->badblock_pattern	= &gpmi_bbt_descr;
   2678	chip->options		|= NAND_NO_SUBPAGE_WRITE;
   2679
   2680	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
   2681	this->swap_block_mark = !GPMI_IS_MX23(this);
   2682
   2683	/*
   2684	 * Allocate a temporary DMA buffer for reading ID in the
   2685	 * nand_scan_ident().
   2686	 */
   2687	this->bch_geometry.payload_size = 1024;
   2688	this->bch_geometry.auxiliary_size = 128;
   2689	ret = gpmi_alloc_dma_buffer(this);
   2690	if (ret)
   2691		return ret;
   2692
   2693	nand_controller_init(&this->base);
   2694	this->base.ops = &gpmi_nand_controller_ops;
   2695	chip->controller = &this->base;
   2696
   2697	ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
   2698	if (ret)
   2699		goto err_out;
   2700
   2701	ret = nand_boot_init(this);
   2702	if (ret)
   2703		goto err_nand_cleanup;
   2704	ret = nand_create_bbt(chip);
   2705	if (ret)
   2706		goto err_nand_cleanup;
   2707
   2708	ret = mtd_device_register(mtd, NULL, 0);
   2709	if (ret)
   2710		goto err_nand_cleanup;
   2711	return 0;
   2712
   2713err_nand_cleanup:
   2714	nand_cleanup(chip);
   2715err_out:
   2716	gpmi_free_dma_buffer(this);
   2717	return ret;
   2718}
   2719
   2720static const struct of_device_id gpmi_nand_id_table[] = {
   2721	{ .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, },
   2722	{ .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, },
   2723	{ .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
   2724	{ .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
   2725	{ .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
   2726	{}
   2727};
   2728MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
   2729
   2730static int gpmi_nand_probe(struct platform_device *pdev)
   2731{
   2732	struct gpmi_nand_data *this;
   2733	int ret;
   2734
   2735	this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
   2736	if (!this)
   2737		return -ENOMEM;
   2738
   2739	this->devdata = of_device_get_match_data(&pdev->dev);
   2740	platform_set_drvdata(pdev, this);
   2741	this->pdev  = pdev;
   2742	this->dev   = &pdev->dev;
   2743
   2744	ret = acquire_resources(this);
   2745	if (ret)
   2746		goto exit_acquire_resources;
   2747
   2748	ret = __gpmi_enable_clk(this, true);
   2749	if (ret)
   2750		goto exit_acquire_resources;
   2751
   2752	pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
   2753	pm_runtime_use_autosuspend(&pdev->dev);
   2754	pm_runtime_set_active(&pdev->dev);
   2755	pm_runtime_enable(&pdev->dev);
   2756	pm_runtime_get_sync(&pdev->dev);
   2757
   2758	ret = gpmi_init(this);
   2759	if (ret)
   2760		goto exit_nfc_init;
   2761
   2762	ret = gpmi_nand_init(this);
   2763	if (ret)
   2764		goto exit_nfc_init;
   2765
   2766	pm_runtime_mark_last_busy(&pdev->dev);
   2767	pm_runtime_put_autosuspend(&pdev->dev);
   2768
   2769	dev_info(this->dev, "driver registered.\n");
   2770
   2771	return 0;
   2772
   2773exit_nfc_init:
   2774	pm_runtime_put(&pdev->dev);
   2775	pm_runtime_disable(&pdev->dev);
   2776	release_resources(this);
   2777exit_acquire_resources:
   2778
   2779	return ret;
   2780}
   2781
   2782static int gpmi_nand_remove(struct platform_device *pdev)
   2783{
   2784	struct gpmi_nand_data *this = platform_get_drvdata(pdev);
   2785	struct nand_chip *chip = &this->nand;
   2786	int ret;
   2787
   2788	pm_runtime_put_sync(&pdev->dev);
   2789	pm_runtime_disable(&pdev->dev);
   2790
   2791	ret = mtd_device_unregister(nand_to_mtd(chip));
   2792	WARN_ON(ret);
   2793	nand_cleanup(chip);
   2794	gpmi_free_dma_buffer(this);
   2795	release_resources(this);
   2796	return 0;
   2797}
   2798
   2799#ifdef CONFIG_PM_SLEEP
   2800static int gpmi_pm_suspend(struct device *dev)
   2801{
   2802	struct gpmi_nand_data *this = dev_get_drvdata(dev);
   2803
   2804	release_dma_channels(this);
   2805	return 0;
   2806}
   2807
   2808static int gpmi_pm_resume(struct device *dev)
   2809{
   2810	struct gpmi_nand_data *this = dev_get_drvdata(dev);
   2811	int ret;
   2812
   2813	ret = acquire_dma_channels(this);
   2814	if (ret < 0)
   2815		return ret;
   2816
   2817	/* re-init the GPMI registers */
   2818	ret = gpmi_init(this);
   2819	if (ret) {
   2820		dev_err(this->dev, "Error setting GPMI : %d\n", ret);
   2821		return ret;
   2822	}
   2823
   2824	/* Set flag to get timing setup restored for next exec_op */
   2825	if (this->hw.clk_rate)
   2826		this->hw.must_apply_timings = true;
   2827
   2828	/* re-init the BCH registers */
   2829	ret = bch_set_geometry(this);
   2830	if (ret) {
   2831		dev_err(this->dev, "Error setting BCH : %d\n", ret);
   2832		return ret;
   2833	}
   2834
   2835	return 0;
   2836}
   2837#endif /* CONFIG_PM_SLEEP */
   2838
   2839static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
   2840{
   2841	struct gpmi_nand_data *this = dev_get_drvdata(dev);
   2842
   2843	return __gpmi_enable_clk(this, false);
   2844}
   2845
   2846static int __maybe_unused gpmi_runtime_resume(struct device *dev)
   2847{
   2848	struct gpmi_nand_data *this = dev_get_drvdata(dev);
   2849
   2850	return __gpmi_enable_clk(this, true);
   2851}
   2852
   2853static const struct dev_pm_ops gpmi_pm_ops = {
   2854	SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
   2855	SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
   2856};
   2857
   2858static struct platform_driver gpmi_nand_driver = {
   2859	.driver = {
   2860		.name = "gpmi-nand",
   2861		.pm = &gpmi_pm_ops,
   2862		.of_match_table = gpmi_nand_id_table,
   2863	},
   2864	.probe   = gpmi_nand_probe,
   2865	.remove  = gpmi_nand_remove,
   2866};
   2867module_platform_driver(gpmi_nand_driver);
   2868
   2869MODULE_AUTHOR("Freescale Semiconductor, Inc.");
   2870MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
   2871MODULE_LICENSE("GPL");