cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (33424B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2016-2017 Micron Technology, Inc.
      4 *
      5 * Authors:
      6 *	Peter Pan <peterpandong@micron.com>
      7 *	Boris Brezillon <boris.brezillon@bootlin.com>
      8 */
      9
     10#define pr_fmt(fmt)	"spi-nand: " fmt
     11
     12#include <linux/device.h>
     13#include <linux/jiffies.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/mtd/spinand.h>
     17#include <linux/of.h>
     18#include <linux/slab.h>
     19#include <linux/string.h>
     20#include <linux/spi/spi.h>
     21#include <linux/spi/spi-mem.h>
     22
     23static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
     24{
     25	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
     26						      spinand->scratchbuf);
     27	int ret;
     28
     29	ret = spi_mem_exec_op(spinand->spimem, &op);
     30	if (ret)
     31		return ret;
     32
     33	*val = *spinand->scratchbuf;
     34	return 0;
     35}
     36
     37static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
     38{
     39	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
     40						      spinand->scratchbuf);
     41
     42	*spinand->scratchbuf = val;
     43	return spi_mem_exec_op(spinand->spimem, &op);
     44}
     45
     46static int spinand_read_status(struct spinand_device *spinand, u8 *status)
     47{
     48	return spinand_read_reg_op(spinand, REG_STATUS, status);
     49}
     50
     51static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
     52{
     53	struct nand_device *nand = spinand_to_nand(spinand);
     54
     55	if (WARN_ON(spinand->cur_target < 0 ||
     56		    spinand->cur_target >= nand->memorg.ntargets))
     57		return -EINVAL;
     58
     59	*cfg = spinand->cfg_cache[spinand->cur_target];
     60	return 0;
     61}
     62
     63static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
     64{
     65	struct nand_device *nand = spinand_to_nand(spinand);
     66	int ret;
     67
     68	if (WARN_ON(spinand->cur_target < 0 ||
     69		    spinand->cur_target >= nand->memorg.ntargets))
     70		return -EINVAL;
     71
     72	if (spinand->cfg_cache[spinand->cur_target] == cfg)
     73		return 0;
     74
     75	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
     76	if (ret)
     77		return ret;
     78
     79	spinand->cfg_cache[spinand->cur_target] = cfg;
     80	return 0;
     81}
     82
     83/**
     84 * spinand_upd_cfg() - Update the configuration register
     85 * @spinand: the spinand device
     86 * @mask: the mask encoding the bits to update in the config reg
     87 * @val: the new value to apply
     88 *
     89 * Update the configuration register.
     90 *
     91 * Return: 0 on success, a negative error code otherwise.
     92 */
     93int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
     94{
     95	int ret;
     96	u8 cfg;
     97
     98	ret = spinand_get_cfg(spinand, &cfg);
     99	if (ret)
    100		return ret;
    101
    102	cfg &= ~mask;
    103	cfg |= val;
    104
    105	return spinand_set_cfg(spinand, cfg);
    106}
    107
    108/**
    109 * spinand_select_target() - Select a specific NAND target/die
    110 * @spinand: the spinand device
    111 * @target: the target/die to select
    112 *
    113 * Select a new target/die. If chip only has one die, this function is a NOOP.
    114 *
    115 * Return: 0 on success, a negative error code otherwise.
    116 */
    117int spinand_select_target(struct spinand_device *spinand, unsigned int target)
    118{
    119	struct nand_device *nand = spinand_to_nand(spinand);
    120	int ret;
    121
    122	if (WARN_ON(target >= nand->memorg.ntargets))
    123		return -EINVAL;
    124
    125	if (spinand->cur_target == target)
    126		return 0;
    127
    128	if (nand->memorg.ntargets == 1) {
    129		spinand->cur_target = target;
    130		return 0;
    131	}
    132
    133	ret = spinand->select_target(spinand, target);
    134	if (ret)
    135		return ret;
    136
    137	spinand->cur_target = target;
    138	return 0;
    139}
    140
    141static int spinand_read_cfg(struct spinand_device *spinand)
    142{
    143	struct nand_device *nand = spinand_to_nand(spinand);
    144	unsigned int target;
    145	int ret;
    146
    147	for (target = 0; target < nand->memorg.ntargets; target++) {
    148		ret = spinand_select_target(spinand, target);
    149		if (ret)
    150			return ret;
    151
    152		/*
    153		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
    154		 * here to bypass the config cache.
    155		 */
    156		ret = spinand_read_reg_op(spinand, REG_CFG,
    157					  &spinand->cfg_cache[target]);
    158		if (ret)
    159			return ret;
    160	}
    161
    162	return 0;
    163}
    164
    165static int spinand_init_cfg_cache(struct spinand_device *spinand)
    166{
    167	struct nand_device *nand = spinand_to_nand(spinand);
    168	struct device *dev = &spinand->spimem->spi->dev;
    169
    170	spinand->cfg_cache = devm_kcalloc(dev,
    171					  nand->memorg.ntargets,
    172					  sizeof(*spinand->cfg_cache),
    173					  GFP_KERNEL);
    174	if (!spinand->cfg_cache)
    175		return -ENOMEM;
    176
    177	return 0;
    178}
    179
    180static int spinand_init_quad_enable(struct spinand_device *spinand)
    181{
    182	bool enable = false;
    183
    184	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
    185		return 0;
    186
    187	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
    188	    spinand->op_templates.write_cache->data.buswidth == 4 ||
    189	    spinand->op_templates.update_cache->data.buswidth == 4)
    190		enable = true;
    191
    192	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
    193			       enable ? CFG_QUAD_ENABLE : 0);
    194}
    195
    196static int spinand_ecc_enable(struct spinand_device *spinand,
    197			      bool enable)
    198{
    199	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
    200			       enable ? CFG_ECC_ENABLE : 0);
    201}
    202
    203static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
    204{
    205	struct nand_device *nand = spinand_to_nand(spinand);
    206
    207	if (spinand->eccinfo.get_status)
    208		return spinand->eccinfo.get_status(spinand, status);
    209
    210	switch (status & STATUS_ECC_MASK) {
    211	case STATUS_ECC_NO_BITFLIPS:
    212		return 0;
    213
    214	case STATUS_ECC_HAS_BITFLIPS:
    215		/*
    216		 * We have no way to know exactly how many bitflips have been
    217		 * fixed, so let's return the maximum possible value so that
    218		 * wear-leveling layers move the data immediately.
    219		 */
    220		return nanddev_get_ecc_conf(nand)->strength;
    221
    222	case STATUS_ECC_UNCOR_ERROR:
    223		return -EBADMSG;
    224
    225	default:
    226		break;
    227	}
    228
    229	return -EINVAL;
    230}
    231
    232static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
    233				       struct mtd_oob_region *region)
    234{
    235	return -ERANGE;
    236}
    237
    238static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
    239					struct mtd_oob_region *region)
    240{
    241	if (section)
    242		return -ERANGE;
    243
    244	/* Reserve 2 bytes for the BBM. */
    245	region->offset = 2;
    246	region->length = 62;
    247
    248	return 0;
    249}
    250
    251static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
    252	.ecc = spinand_noecc_ooblayout_ecc,
    253	.free = spinand_noecc_ooblayout_free,
    254};
    255
    256static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
    257{
    258	struct spinand_device *spinand = nand_to_spinand(nand);
    259	struct mtd_info *mtd = nanddev_to_mtd(nand);
    260	struct spinand_ondie_ecc_conf *engine_conf;
    261
    262	nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
    263	nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
    264	nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
    265
    266	engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
    267	if (!engine_conf)
    268		return -ENOMEM;
    269
    270	nand->ecc.ctx.priv = engine_conf;
    271
    272	if (spinand->eccinfo.ooblayout)
    273		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
    274	else
    275		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
    276
    277	return 0;
    278}
    279
    280static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
    281{
    282	kfree(nand->ecc.ctx.priv);
    283}
    284
    285static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
    286					    struct nand_page_io_req *req)
    287{
    288	struct spinand_device *spinand = nand_to_spinand(nand);
    289	bool enable = (req->mode != MTD_OPS_RAW);
    290
    291	memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
    292
    293	/* Only enable or disable the engine */
    294	return spinand_ecc_enable(spinand, enable);
    295}
    296
    297static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
    298					   struct nand_page_io_req *req)
    299{
    300	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
    301	struct spinand_device *spinand = nand_to_spinand(nand);
    302	struct mtd_info *mtd = spinand_to_mtd(spinand);
    303	int ret;
    304
    305	if (req->mode == MTD_OPS_RAW)
    306		return 0;
    307
    308	/* Nothing to do when finishing a page write */
    309	if (req->type == NAND_PAGE_WRITE)
    310		return 0;
    311
    312	/* Finish a page read: check the status, report errors/bitflips */
    313	ret = spinand_check_ecc_status(spinand, engine_conf->status);
    314	if (ret == -EBADMSG)
    315		mtd->ecc_stats.failed++;
    316	else if (ret > 0)
    317		mtd->ecc_stats.corrected += ret;
    318
    319	return ret;
    320}
    321
    322static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
    323	.init_ctx = spinand_ondie_ecc_init_ctx,
    324	.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
    325	.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
    326	.finish_io_req = spinand_ondie_ecc_finish_io_req,
    327};
    328
    329static struct nand_ecc_engine spinand_ondie_ecc_engine = {
    330	.ops = &spinand_ondie_ecc_engine_ops,
    331};
    332
    333static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
    334{
    335	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
    336
    337	if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
    338	    engine_conf)
    339		engine_conf->status = status;
    340}
    341
    342static int spinand_write_enable_op(struct spinand_device *spinand)
    343{
    344	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
    345
    346	return spi_mem_exec_op(spinand->spimem, &op);
    347}
    348
    349static int spinand_load_page_op(struct spinand_device *spinand,
    350				const struct nand_page_io_req *req)
    351{
    352	struct nand_device *nand = spinand_to_nand(spinand);
    353	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
    354	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
    355
    356	return spi_mem_exec_op(spinand->spimem, &op);
    357}
    358
    359static int spinand_read_from_cache_op(struct spinand_device *spinand,
    360				      const struct nand_page_io_req *req)
    361{
    362	struct nand_device *nand = spinand_to_nand(spinand);
    363	struct mtd_info *mtd = spinand_to_mtd(spinand);
    364	struct spi_mem_dirmap_desc *rdesc;
    365	unsigned int nbytes = 0;
    366	void *buf = NULL;
    367	u16 column = 0;
    368	ssize_t ret;
    369
    370	if (req->datalen) {
    371		buf = spinand->databuf;
    372		nbytes = nanddev_page_size(nand);
    373		column = 0;
    374	}
    375
    376	if (req->ooblen) {
    377		nbytes += nanddev_per_page_oobsize(nand);
    378		if (!buf) {
    379			buf = spinand->oobbuf;
    380			column = nanddev_page_size(nand);
    381		}
    382	}
    383
    384	if (req->mode == MTD_OPS_RAW)
    385		rdesc = spinand->dirmaps[req->pos.plane].rdesc;
    386	else
    387		rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
    388
    389	while (nbytes) {
    390		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
    391		if (ret < 0)
    392			return ret;
    393
    394		if (!ret || ret > nbytes)
    395			return -EIO;
    396
    397		nbytes -= ret;
    398		column += ret;
    399		buf += ret;
    400	}
    401
    402	if (req->datalen)
    403		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
    404		       req->datalen);
    405
    406	if (req->ooblen) {
    407		if (req->mode == MTD_OPS_AUTO_OOB)
    408			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
    409						    spinand->oobbuf,
    410						    req->ooboffs,
    411						    req->ooblen);
    412		else
    413			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
    414			       req->ooblen);
    415	}
    416
    417	return 0;
    418}
    419
    420static int spinand_write_to_cache_op(struct spinand_device *spinand,
    421				     const struct nand_page_io_req *req)
    422{
    423	struct nand_device *nand = spinand_to_nand(spinand);
    424	struct mtd_info *mtd = spinand_to_mtd(spinand);
    425	struct spi_mem_dirmap_desc *wdesc;
    426	unsigned int nbytes, column = 0;
    427	void *buf = spinand->databuf;
    428	ssize_t ret;
    429
    430	/*
    431	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
    432	 * the cache content to 0xFF (depends on vendor implementation), so we
    433	 * must fill the page cache entirely even if we only want to program
    434	 * the data portion of the page, otherwise we might corrupt the BBM or
    435	 * user data previously programmed in OOB area.
    436	 *
    437	 * Only reset the data buffer manually, the OOB buffer is prepared by
    438	 * ECC engines ->prepare_io_req() callback.
    439	 */
    440	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
    441	memset(spinand->databuf, 0xff, nanddev_page_size(nand));
    442
    443	if (req->datalen)
    444		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
    445		       req->datalen);
    446
    447	if (req->ooblen) {
    448		if (req->mode == MTD_OPS_AUTO_OOB)
    449			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
    450						    spinand->oobbuf,
    451						    req->ooboffs,
    452						    req->ooblen);
    453		else
    454			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
    455			       req->ooblen);
    456	}
    457
    458	if (req->mode == MTD_OPS_RAW)
    459		wdesc = spinand->dirmaps[req->pos.plane].wdesc;
    460	else
    461		wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
    462
    463	while (nbytes) {
    464		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
    465		if (ret < 0)
    466			return ret;
    467
    468		if (!ret || ret > nbytes)
    469			return -EIO;
    470
    471		nbytes -= ret;
    472		column += ret;
    473		buf += ret;
    474	}
    475
    476	return 0;
    477}
    478
    479static int spinand_program_op(struct spinand_device *spinand,
    480			      const struct nand_page_io_req *req)
    481{
    482	struct nand_device *nand = spinand_to_nand(spinand);
    483	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
    484	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
    485
    486	return spi_mem_exec_op(spinand->spimem, &op);
    487}
    488
    489static int spinand_erase_op(struct spinand_device *spinand,
    490			    const struct nand_pos *pos)
    491{
    492	struct nand_device *nand = spinand_to_nand(spinand);
    493	unsigned int row = nanddev_pos_to_row(nand, pos);
    494	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
    495
    496	return spi_mem_exec_op(spinand->spimem, &op);
    497}
    498
    499static int spinand_wait(struct spinand_device *spinand,
    500			unsigned long initial_delay_us,
    501			unsigned long poll_delay_us,
    502			u8 *s)
    503{
    504	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
    505						      spinand->scratchbuf);
    506	u8 status;
    507	int ret;
    508
    509	ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
    510				  initial_delay_us,
    511				  poll_delay_us,
    512				  SPINAND_WAITRDY_TIMEOUT_MS);
    513	if (ret)
    514		return ret;
    515
    516	status = *spinand->scratchbuf;
    517	if (!(status & STATUS_BUSY))
    518		goto out;
    519
    520	/*
    521	 * Extra read, just in case the STATUS_READY bit has changed
    522	 * since our last check
    523	 */
    524	ret = spinand_read_status(spinand, &status);
    525	if (ret)
    526		return ret;
    527
    528out:
    529	if (s)
    530		*s = status;
    531
    532	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
    533}
    534
    535static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
    536			      u8 ndummy, u8 *buf)
    537{
    538	struct spi_mem_op op = SPINAND_READID_OP(
    539		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
    540	int ret;
    541
    542	ret = spi_mem_exec_op(spinand->spimem, &op);
    543	if (!ret)
    544		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
    545
    546	return ret;
    547}
    548
    549static int spinand_reset_op(struct spinand_device *spinand)
    550{
    551	struct spi_mem_op op = SPINAND_RESET_OP;
    552	int ret;
    553
    554	ret = spi_mem_exec_op(spinand->spimem, &op);
    555	if (ret)
    556		return ret;
    557
    558	return spinand_wait(spinand,
    559			    SPINAND_RESET_INITIAL_DELAY_US,
    560			    SPINAND_RESET_POLL_DELAY_US,
    561			    NULL);
    562}
    563
    564static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
    565{
    566	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
    567}
    568
    569static int spinand_read_page(struct spinand_device *spinand,
    570			     const struct nand_page_io_req *req)
    571{
    572	struct nand_device *nand = spinand_to_nand(spinand);
    573	u8 status;
    574	int ret;
    575
    576	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
    577	if (ret)
    578		return ret;
    579
    580	ret = spinand_load_page_op(spinand, req);
    581	if (ret)
    582		return ret;
    583
    584	ret = spinand_wait(spinand,
    585			   SPINAND_READ_INITIAL_DELAY_US,
    586			   SPINAND_READ_POLL_DELAY_US,
    587			   &status);
    588	if (ret < 0)
    589		return ret;
    590
    591	spinand_ondie_ecc_save_status(nand, status);
    592
    593	ret = spinand_read_from_cache_op(spinand, req);
    594	if (ret)
    595		return ret;
    596
    597	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
    598}
    599
    600static int spinand_write_page(struct spinand_device *spinand,
    601			      const struct nand_page_io_req *req)
    602{
    603	struct nand_device *nand = spinand_to_nand(spinand);
    604	u8 status;
    605	int ret;
    606
    607	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
    608	if (ret)
    609		return ret;
    610
    611	ret = spinand_write_enable_op(spinand);
    612	if (ret)
    613		return ret;
    614
    615	ret = spinand_write_to_cache_op(spinand, req);
    616	if (ret)
    617		return ret;
    618
    619	ret = spinand_program_op(spinand, req);
    620	if (ret)
    621		return ret;
    622
    623	ret = spinand_wait(spinand,
    624			   SPINAND_WRITE_INITIAL_DELAY_US,
    625			   SPINAND_WRITE_POLL_DELAY_US,
    626			   &status);
    627	if (!ret && (status & STATUS_PROG_FAILED))
    628		return -EIO;
    629
    630	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
    631}
    632
    633static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
    634			    struct mtd_oob_ops *ops)
    635{
    636	struct spinand_device *spinand = mtd_to_spinand(mtd);
    637	struct nand_device *nand = mtd_to_nanddev(mtd);
    638	unsigned int max_bitflips = 0;
    639	struct nand_io_iter iter;
    640	bool disable_ecc = false;
    641	bool ecc_failed = false;
    642	int ret = 0;
    643
    644	if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
    645		disable_ecc = true;
    646
    647	mutex_lock(&spinand->lock);
    648
    649	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
    650		if (disable_ecc)
    651			iter.req.mode = MTD_OPS_RAW;
    652
    653		ret = spinand_select_target(spinand, iter.req.pos.target);
    654		if (ret)
    655			break;
    656
    657		ret = spinand_read_page(spinand, &iter.req);
    658		if (ret < 0 && ret != -EBADMSG)
    659			break;
    660
    661		if (ret == -EBADMSG)
    662			ecc_failed = true;
    663		else
    664			max_bitflips = max_t(unsigned int, max_bitflips, ret);
    665
    666		ret = 0;
    667		ops->retlen += iter.req.datalen;
    668		ops->oobretlen += iter.req.ooblen;
    669	}
    670
    671	mutex_unlock(&spinand->lock);
    672
    673	if (ecc_failed && !ret)
    674		ret = -EBADMSG;
    675
    676	return ret ? ret : max_bitflips;
    677}
    678
    679static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
    680			     struct mtd_oob_ops *ops)
    681{
    682	struct spinand_device *spinand = mtd_to_spinand(mtd);
    683	struct nand_device *nand = mtd_to_nanddev(mtd);
    684	struct nand_io_iter iter;
    685	bool disable_ecc = false;
    686	int ret = 0;
    687
    688	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
    689		disable_ecc = true;
    690
    691	mutex_lock(&spinand->lock);
    692
    693	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
    694		if (disable_ecc)
    695			iter.req.mode = MTD_OPS_RAW;
    696
    697		ret = spinand_select_target(spinand, iter.req.pos.target);
    698		if (ret)
    699			break;
    700
    701		ret = spinand_write_page(spinand, &iter.req);
    702		if (ret)
    703			break;
    704
    705		ops->retlen += iter.req.datalen;
    706		ops->oobretlen += iter.req.ooblen;
    707	}
    708
    709	mutex_unlock(&spinand->lock);
    710
    711	return ret;
    712}
    713
    714static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
    715{
    716	struct spinand_device *spinand = nand_to_spinand(nand);
    717	u8 marker[2] = { };
    718	struct nand_page_io_req req = {
    719		.pos = *pos,
    720		.ooblen = sizeof(marker),
    721		.ooboffs = 0,
    722		.oobbuf.in = marker,
    723		.mode = MTD_OPS_RAW,
    724	};
    725
    726	spinand_select_target(spinand, pos->target);
    727	spinand_read_page(spinand, &req);
    728	if (marker[0] != 0xff || marker[1] != 0xff)
    729		return true;
    730
    731	return false;
    732}
    733
    734static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
    735{
    736	struct nand_device *nand = mtd_to_nanddev(mtd);
    737	struct spinand_device *spinand = nand_to_spinand(nand);
    738	struct nand_pos pos;
    739	int ret;
    740
    741	nanddev_offs_to_pos(nand, offs, &pos);
    742	mutex_lock(&spinand->lock);
    743	ret = nanddev_isbad(nand, &pos);
    744	mutex_unlock(&spinand->lock);
    745
    746	return ret;
    747}
    748
    749static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
    750{
    751	struct spinand_device *spinand = nand_to_spinand(nand);
    752	u8 marker[2] = { };
    753	struct nand_page_io_req req = {
    754		.pos = *pos,
    755		.ooboffs = 0,
    756		.ooblen = sizeof(marker),
    757		.oobbuf.out = marker,
    758		.mode = MTD_OPS_RAW,
    759	};
    760	int ret;
    761
    762	ret = spinand_select_target(spinand, pos->target);
    763	if (ret)
    764		return ret;
    765
    766	ret = spinand_write_enable_op(spinand);
    767	if (ret)
    768		return ret;
    769
    770	return spinand_write_page(spinand, &req);
    771}
    772
    773static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
    774{
    775	struct nand_device *nand = mtd_to_nanddev(mtd);
    776	struct spinand_device *spinand = nand_to_spinand(nand);
    777	struct nand_pos pos;
    778	int ret;
    779
    780	nanddev_offs_to_pos(nand, offs, &pos);
    781	mutex_lock(&spinand->lock);
    782	ret = nanddev_markbad(nand, &pos);
    783	mutex_unlock(&spinand->lock);
    784
    785	return ret;
    786}
    787
    788static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
    789{
    790	struct spinand_device *spinand = nand_to_spinand(nand);
    791	u8 status;
    792	int ret;
    793
    794	ret = spinand_select_target(spinand, pos->target);
    795	if (ret)
    796		return ret;
    797
    798	ret = spinand_write_enable_op(spinand);
    799	if (ret)
    800		return ret;
    801
    802	ret = spinand_erase_op(spinand, pos);
    803	if (ret)
    804		return ret;
    805
    806	ret = spinand_wait(spinand,
    807			   SPINAND_ERASE_INITIAL_DELAY_US,
    808			   SPINAND_ERASE_POLL_DELAY_US,
    809			   &status);
    810
    811	if (!ret && (status & STATUS_ERASE_FAILED))
    812		ret = -EIO;
    813
    814	return ret;
    815}
    816
    817static int spinand_mtd_erase(struct mtd_info *mtd,
    818			     struct erase_info *einfo)
    819{
    820	struct spinand_device *spinand = mtd_to_spinand(mtd);
    821	int ret;
    822
    823	mutex_lock(&spinand->lock);
    824	ret = nanddev_mtd_erase(mtd, einfo);
    825	mutex_unlock(&spinand->lock);
    826
    827	return ret;
    828}
    829
    830static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
    831{
    832	struct spinand_device *spinand = mtd_to_spinand(mtd);
    833	struct nand_device *nand = mtd_to_nanddev(mtd);
    834	struct nand_pos pos;
    835	int ret;
    836
    837	nanddev_offs_to_pos(nand, offs, &pos);
    838	mutex_lock(&spinand->lock);
    839	ret = nanddev_isreserved(nand, &pos);
    840	mutex_unlock(&spinand->lock);
    841
    842	return ret;
    843}
    844
    845static int spinand_create_dirmap(struct spinand_device *spinand,
    846				 unsigned int plane)
    847{
    848	struct nand_device *nand = spinand_to_nand(spinand);
    849	struct spi_mem_dirmap_info info = {
    850		.length = nanddev_page_size(nand) +
    851			  nanddev_per_page_oobsize(nand),
    852	};
    853	struct spi_mem_dirmap_desc *desc;
    854
    855	/* The plane number is passed in MSB just above the column address */
    856	info.offset = plane << fls(nand->memorg.pagesize);
    857
    858	info.op_tmpl = *spinand->op_templates.update_cache;
    859	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
    860					  spinand->spimem, &info);
    861	if (IS_ERR(desc))
    862		return PTR_ERR(desc);
    863
    864	spinand->dirmaps[plane].wdesc = desc;
    865
    866	info.op_tmpl = *spinand->op_templates.read_cache;
    867	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
    868					  spinand->spimem, &info);
    869	if (IS_ERR(desc))
    870		return PTR_ERR(desc);
    871
    872	spinand->dirmaps[plane].rdesc = desc;
    873
    874	if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
    875		spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
    876		spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
    877
    878		return 0;
    879	}
    880
    881	info.op_tmpl = *spinand->op_templates.update_cache;
    882	info.op_tmpl.data.ecc = true;
    883	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
    884					  spinand->spimem, &info);
    885	if (IS_ERR(desc))
    886		return PTR_ERR(desc);
    887
    888	spinand->dirmaps[plane].wdesc_ecc = desc;
    889
    890	info.op_tmpl = *spinand->op_templates.read_cache;
    891	info.op_tmpl.data.ecc = true;
    892	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
    893					  spinand->spimem, &info);
    894	if (IS_ERR(desc))
    895		return PTR_ERR(desc);
    896
    897	spinand->dirmaps[plane].rdesc_ecc = desc;
    898
    899	return 0;
    900}
    901
    902static int spinand_create_dirmaps(struct spinand_device *spinand)
    903{
    904	struct nand_device *nand = spinand_to_nand(spinand);
    905	int i, ret;
    906
    907	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
    908					sizeof(*spinand->dirmaps) *
    909					nand->memorg.planes_per_lun,
    910					GFP_KERNEL);
    911	if (!spinand->dirmaps)
    912		return -ENOMEM;
    913
    914	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
    915		ret = spinand_create_dirmap(spinand, i);
    916		if (ret)
    917			return ret;
    918	}
    919
    920	return 0;
    921}
    922
    923static const struct nand_ops spinand_ops = {
    924	.erase = spinand_erase,
    925	.markbad = spinand_markbad,
    926	.isbad = spinand_isbad,
    927};
    928
    929static const struct spinand_manufacturer *spinand_manufacturers[] = {
    930	&gigadevice_spinand_manufacturer,
    931	&macronix_spinand_manufacturer,
    932	&micron_spinand_manufacturer,
    933	&paragon_spinand_manufacturer,
    934	&toshiba_spinand_manufacturer,
    935	&winbond_spinand_manufacturer,
    936	&xtx_spinand_manufacturer,
    937};
    938
    939static int spinand_manufacturer_match(struct spinand_device *spinand,
    940				      enum spinand_readid_method rdid_method)
    941{
    942	u8 *id = spinand->id.data;
    943	unsigned int i;
    944	int ret;
    945
    946	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
    947		const struct spinand_manufacturer *manufacturer =
    948			spinand_manufacturers[i];
    949
    950		if (id[0] != manufacturer->id)
    951			continue;
    952
    953		ret = spinand_match_and_init(spinand,
    954					     manufacturer->chips,
    955					     manufacturer->nchips,
    956					     rdid_method);
    957		if (ret < 0)
    958			continue;
    959
    960		spinand->manufacturer = manufacturer;
    961		return 0;
    962	}
    963	return -ENOTSUPP;
    964}
    965
    966static int spinand_id_detect(struct spinand_device *spinand)
    967{
    968	u8 *id = spinand->id.data;
    969	int ret;
    970
    971	ret = spinand_read_id_op(spinand, 0, 0, id);
    972	if (ret)
    973		return ret;
    974	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
    975	if (!ret)
    976		return 0;
    977
    978	ret = spinand_read_id_op(spinand, 1, 0, id);
    979	if (ret)
    980		return ret;
    981	ret = spinand_manufacturer_match(spinand,
    982					 SPINAND_READID_METHOD_OPCODE_ADDR);
    983	if (!ret)
    984		return 0;
    985
    986	ret = spinand_read_id_op(spinand, 0, 1, id);
    987	if (ret)
    988		return ret;
    989	ret = spinand_manufacturer_match(spinand,
    990					 SPINAND_READID_METHOD_OPCODE_DUMMY);
    991
    992	return ret;
    993}
    994
    995static int spinand_manufacturer_init(struct spinand_device *spinand)
    996{
    997	if (spinand->manufacturer->ops->init)
    998		return spinand->manufacturer->ops->init(spinand);
    999
   1000	return 0;
   1001}
   1002
   1003static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
   1004{
   1005	/* Release manufacturer private data */
   1006	if (spinand->manufacturer->ops->cleanup)
   1007		return spinand->manufacturer->ops->cleanup(spinand);
   1008}
   1009
   1010static const struct spi_mem_op *
   1011spinand_select_op_variant(struct spinand_device *spinand,
   1012			  const struct spinand_op_variants *variants)
   1013{
   1014	struct nand_device *nand = spinand_to_nand(spinand);
   1015	unsigned int i;
   1016
   1017	for (i = 0; i < variants->nops; i++) {
   1018		struct spi_mem_op op = variants->ops[i];
   1019		unsigned int nbytes;
   1020		int ret;
   1021
   1022		nbytes = nanddev_per_page_oobsize(nand) +
   1023			 nanddev_page_size(nand);
   1024
   1025		while (nbytes) {
   1026			op.data.nbytes = nbytes;
   1027			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
   1028			if (ret)
   1029				break;
   1030
   1031			if (!spi_mem_supports_op(spinand->spimem, &op))
   1032				break;
   1033
   1034			nbytes -= op.data.nbytes;
   1035		}
   1036
   1037		if (!nbytes)
   1038			return &variants->ops[i];
   1039	}
   1040
   1041	return NULL;
   1042}
   1043
   1044/**
   1045 * spinand_match_and_init() - Try to find a match between a device ID and an
   1046 *			      entry in a spinand_info table
   1047 * @spinand: SPI NAND object
   1048 * @table: SPI NAND device description table
   1049 * @table_size: size of the device description table
   1050 * @rdid_method: read id method to match
   1051 *
   1052 * Match between a device ID retrieved through the READ_ID command and an
   1053 * entry in the SPI NAND description table. If a match is found, the spinand
   1054 * object will be initialized with information provided by the matching
   1055 * spinand_info entry.
   1056 *
   1057 * Return: 0 on success, a negative error code otherwise.
   1058 */
   1059int spinand_match_and_init(struct spinand_device *spinand,
   1060			   const struct spinand_info *table,
   1061			   unsigned int table_size,
   1062			   enum spinand_readid_method rdid_method)
   1063{
   1064	u8 *id = spinand->id.data;
   1065	struct nand_device *nand = spinand_to_nand(spinand);
   1066	unsigned int i;
   1067
   1068	for (i = 0; i < table_size; i++) {
   1069		const struct spinand_info *info = &table[i];
   1070		const struct spi_mem_op *op;
   1071
   1072		if (rdid_method != info->devid.method)
   1073			continue;
   1074
   1075		if (memcmp(id + 1, info->devid.id, info->devid.len))
   1076			continue;
   1077
   1078		nand->memorg = table[i].memorg;
   1079		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
   1080		spinand->eccinfo = table[i].eccinfo;
   1081		spinand->flags = table[i].flags;
   1082		spinand->id.len = 1 + table[i].devid.len;
   1083		spinand->select_target = table[i].select_target;
   1084
   1085		op = spinand_select_op_variant(spinand,
   1086					       info->op_variants.read_cache);
   1087		if (!op)
   1088			return -ENOTSUPP;
   1089
   1090		spinand->op_templates.read_cache = op;
   1091
   1092		op = spinand_select_op_variant(spinand,
   1093					       info->op_variants.write_cache);
   1094		if (!op)
   1095			return -ENOTSUPP;
   1096
   1097		spinand->op_templates.write_cache = op;
   1098
   1099		op = spinand_select_op_variant(spinand,
   1100					       info->op_variants.update_cache);
   1101		spinand->op_templates.update_cache = op;
   1102
   1103		return 0;
   1104	}
   1105
   1106	return -ENOTSUPP;
   1107}
   1108
   1109static int spinand_detect(struct spinand_device *spinand)
   1110{
   1111	struct device *dev = &spinand->spimem->spi->dev;
   1112	struct nand_device *nand = spinand_to_nand(spinand);
   1113	int ret;
   1114
   1115	ret = spinand_reset_op(spinand);
   1116	if (ret)
   1117		return ret;
   1118
   1119	ret = spinand_id_detect(spinand);
   1120	if (ret) {
   1121		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
   1122			spinand->id.data);
   1123		return ret;
   1124	}
   1125
   1126	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
   1127		dev_err(dev,
   1128			"SPI NANDs with more than one die must implement ->select_target()\n");
   1129		return -EINVAL;
   1130	}
   1131
   1132	dev_info(&spinand->spimem->spi->dev,
   1133		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
   1134	dev_info(&spinand->spimem->spi->dev,
   1135		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
   1136		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
   1137		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
   1138
   1139	return 0;
   1140}
   1141
   1142static int spinand_init_flash(struct spinand_device *spinand)
   1143{
   1144	struct device *dev = &spinand->spimem->spi->dev;
   1145	struct nand_device *nand = spinand_to_nand(spinand);
   1146	int ret, i;
   1147
   1148	ret = spinand_read_cfg(spinand);
   1149	if (ret)
   1150		return ret;
   1151
   1152	ret = spinand_init_quad_enable(spinand);
   1153	if (ret)
   1154		return ret;
   1155
   1156	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
   1157	if (ret)
   1158		return ret;
   1159
   1160	ret = spinand_manufacturer_init(spinand);
   1161	if (ret) {
   1162		dev_err(dev,
   1163		"Failed to initialize the SPI NAND chip (err = %d)\n",
   1164		ret);
   1165		return ret;
   1166	}
   1167
   1168	/* After power up, all blocks are locked, so unlock them here. */
   1169	for (i = 0; i < nand->memorg.ntargets; i++) {
   1170		ret = spinand_select_target(spinand, i);
   1171		if (ret)
   1172			break;
   1173
   1174		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
   1175		if (ret)
   1176			break;
   1177	}
   1178
   1179	if (ret)
   1180		spinand_manufacturer_cleanup(spinand);
   1181
   1182	return ret;
   1183}
   1184
   1185static void spinand_mtd_resume(struct mtd_info *mtd)
   1186{
   1187	struct spinand_device *spinand = mtd_to_spinand(mtd);
   1188	int ret;
   1189
   1190	ret = spinand_reset_op(spinand);
   1191	if (ret)
   1192		return;
   1193
   1194	ret = spinand_init_flash(spinand);
   1195	if (ret)
   1196		return;
   1197
   1198	spinand_ecc_enable(spinand, false);
   1199}
   1200
   1201static int spinand_init(struct spinand_device *spinand)
   1202{
   1203	struct device *dev = &spinand->spimem->spi->dev;
   1204	struct mtd_info *mtd = spinand_to_mtd(spinand);
   1205	struct nand_device *nand = mtd_to_nanddev(mtd);
   1206	int ret;
   1207
   1208	/*
   1209	 * We need a scratch buffer because the spi_mem interface requires that
   1210	 * buf passed in spi_mem_op->data.buf be DMA-able.
   1211	 */
   1212	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
   1213	if (!spinand->scratchbuf)
   1214		return -ENOMEM;
   1215
   1216	ret = spinand_detect(spinand);
   1217	if (ret)
   1218		goto err_free_bufs;
   1219
   1220	/*
   1221	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
   1222	 * may use this buffer for DMA access.
   1223	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
   1224	 */
   1225	spinand->databuf = kzalloc(nanddev_page_size(nand) +
   1226			       nanddev_per_page_oobsize(nand),
   1227			       GFP_KERNEL);
   1228	if (!spinand->databuf) {
   1229		ret = -ENOMEM;
   1230		goto err_free_bufs;
   1231	}
   1232
   1233	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
   1234
   1235	ret = spinand_init_cfg_cache(spinand);
   1236	if (ret)
   1237		goto err_free_bufs;
   1238
   1239	ret = spinand_init_flash(spinand);
   1240	if (ret)
   1241		goto err_free_bufs;
   1242
   1243	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
   1244	if (ret)
   1245		goto err_manuf_cleanup;
   1246
   1247	/* SPI-NAND default ECC engine is on-die */
   1248	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
   1249	nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
   1250
   1251	spinand_ecc_enable(spinand, false);
   1252	ret = nanddev_ecc_engine_init(nand);
   1253	if (ret)
   1254		goto err_cleanup_nanddev;
   1255
   1256	mtd->_read_oob = spinand_mtd_read;
   1257	mtd->_write_oob = spinand_mtd_write;
   1258	mtd->_block_isbad = spinand_mtd_block_isbad;
   1259	mtd->_block_markbad = spinand_mtd_block_markbad;
   1260	mtd->_block_isreserved = spinand_mtd_block_isreserved;
   1261	mtd->_erase = spinand_mtd_erase;
   1262	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
   1263	mtd->_resume = spinand_mtd_resume;
   1264
   1265	if (nand->ecc.engine) {
   1266		ret = mtd_ooblayout_count_freebytes(mtd);
   1267		if (ret < 0)
   1268			goto err_cleanup_ecc_engine;
   1269	}
   1270
   1271	mtd->oobavail = ret;
   1272
   1273	/* Propagate ECC information to mtd_info */
   1274	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
   1275	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
   1276
   1277	ret = spinand_create_dirmaps(spinand);
   1278	if (ret) {
   1279		dev_err(dev,
   1280			"Failed to create direct mappings for read/write operations (err = %d)\n",
   1281			ret);
   1282		goto err_cleanup_ecc_engine;
   1283	}
   1284
   1285	return 0;
   1286
   1287err_cleanup_ecc_engine:
   1288	nanddev_ecc_engine_cleanup(nand);
   1289
   1290err_cleanup_nanddev:
   1291	nanddev_cleanup(nand);
   1292
   1293err_manuf_cleanup:
   1294	spinand_manufacturer_cleanup(spinand);
   1295
   1296err_free_bufs:
   1297	kfree(spinand->databuf);
   1298	kfree(spinand->scratchbuf);
   1299	return ret;
   1300}
   1301
   1302static void spinand_cleanup(struct spinand_device *spinand)
   1303{
   1304	struct nand_device *nand = spinand_to_nand(spinand);
   1305
   1306	nanddev_cleanup(nand);
   1307	spinand_manufacturer_cleanup(spinand);
   1308	kfree(spinand->databuf);
   1309	kfree(spinand->scratchbuf);
   1310}
   1311
   1312static int spinand_probe(struct spi_mem *mem)
   1313{
   1314	struct spinand_device *spinand;
   1315	struct mtd_info *mtd;
   1316	int ret;
   1317
   1318	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
   1319			       GFP_KERNEL);
   1320	if (!spinand)
   1321		return -ENOMEM;
   1322
   1323	spinand->spimem = mem;
   1324	spi_mem_set_drvdata(mem, spinand);
   1325	spinand_set_of_node(spinand, mem->spi->dev.of_node);
   1326	mutex_init(&spinand->lock);
   1327	mtd = spinand_to_mtd(spinand);
   1328	mtd->dev.parent = &mem->spi->dev;
   1329
   1330	ret = spinand_init(spinand);
   1331	if (ret)
   1332		return ret;
   1333
   1334	ret = mtd_device_register(mtd, NULL, 0);
   1335	if (ret)
   1336		goto err_spinand_cleanup;
   1337
   1338	return 0;
   1339
   1340err_spinand_cleanup:
   1341	spinand_cleanup(spinand);
   1342
   1343	return ret;
   1344}
   1345
   1346static int spinand_remove(struct spi_mem *mem)
   1347{
   1348	struct spinand_device *spinand;
   1349	struct mtd_info *mtd;
   1350	int ret;
   1351
   1352	spinand = spi_mem_get_drvdata(mem);
   1353	mtd = spinand_to_mtd(spinand);
   1354
   1355	ret = mtd_device_unregister(mtd);
   1356	if (ret)
   1357		return ret;
   1358
   1359	spinand_cleanup(spinand);
   1360
   1361	return 0;
   1362}
   1363
   1364static const struct spi_device_id spinand_ids[] = {
   1365	{ .name = "spi-nand" },
   1366	{ /* sentinel */ },
   1367};
   1368MODULE_DEVICE_TABLE(spi, spinand_ids);
   1369
   1370#ifdef CONFIG_OF
   1371static const struct of_device_id spinand_of_ids[] = {
   1372	{ .compatible = "spi-nand" },
   1373	{ /* sentinel */ },
   1374};
   1375MODULE_DEVICE_TABLE(of, spinand_of_ids);
   1376#endif
   1377
   1378static struct spi_mem_driver spinand_drv = {
   1379	.spidrv = {
   1380		.id_table = spinand_ids,
   1381		.driver = {
   1382			.name = "spi-nand",
   1383			.of_match_table = of_match_ptr(spinand_of_ids),
   1384		},
   1385	},
   1386	.probe = spinand_probe,
   1387	.remove = spinand_remove,
   1388};
   1389module_spi_mem_driver(spinand_drv);
   1390
   1391MODULE_DESCRIPTION("SPI NAND framework");
   1392MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
   1393MODULE_LICENSE("GPL v2");