cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nandsim.c (68080B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * NAND flash simulator.
      4 *
      5 * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
      6 *
      7 * Copyright (C) 2004 Nokia Corporation
      8 *
      9 * Note: NS means "NAND Simulator".
     10 * Note: Input means input TO flash chip, output means output FROM chip.
     11 */
     12
     13#define pr_fmt(fmt)  "[nandsim]" fmt
     14
     15#include <linux/init.h>
     16#include <linux/types.h>
     17#include <linux/module.h>
     18#include <linux/moduleparam.h>
     19#include <linux/vmalloc.h>
     20#include <linux/math64.h>
     21#include <linux/slab.h>
     22#include <linux/errno.h>
     23#include <linux/string.h>
     24#include <linux/mtd/mtd.h>
     25#include <linux/mtd/rawnand.h>
     26#include <linux/mtd/partitions.h>
     27#include <linux/delay.h>
     28#include <linux/list.h>
     29#include <linux/random.h>
     30#include <linux/sched.h>
     31#include <linux/sched/mm.h>
     32#include <linux/fs.h>
     33#include <linux/pagemap.h>
     34#include <linux/seq_file.h>
     35#include <linux/debugfs.h>
     36
     37/* Default simulator parameters values */
     38#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE)  || \
     39    !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
     40    !defined(CONFIG_NANDSIM_THIRD_ID_BYTE)  || \
     41    !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
     42#define CONFIG_NANDSIM_FIRST_ID_BYTE  0x98
     43#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
     44#define CONFIG_NANDSIM_THIRD_ID_BYTE  0xFF /* No byte */
     45#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
     46#endif
     47
     48#ifndef CONFIG_NANDSIM_ACCESS_DELAY
     49#define CONFIG_NANDSIM_ACCESS_DELAY 25
     50#endif
     51#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
     52#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
     53#endif
     54#ifndef CONFIG_NANDSIM_ERASE_DELAY
     55#define CONFIG_NANDSIM_ERASE_DELAY 2
     56#endif
     57#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
     58#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
     59#endif
     60#ifndef CONFIG_NANDSIM_INPUT_CYCLE
     61#define CONFIG_NANDSIM_INPUT_CYCLE  50
     62#endif
     63#ifndef CONFIG_NANDSIM_BUS_WIDTH
     64#define CONFIG_NANDSIM_BUS_WIDTH  8
     65#endif
     66#ifndef CONFIG_NANDSIM_DO_DELAYS
     67#define CONFIG_NANDSIM_DO_DELAYS  0
     68#endif
     69#ifndef CONFIG_NANDSIM_LOG
     70#define CONFIG_NANDSIM_LOG        0
     71#endif
     72#ifndef CONFIG_NANDSIM_DBG
     73#define CONFIG_NANDSIM_DBG        0
     74#endif
     75#ifndef CONFIG_NANDSIM_MAX_PARTS
     76#define CONFIG_NANDSIM_MAX_PARTS  32
     77#endif
     78
     79static uint access_delay   = CONFIG_NANDSIM_ACCESS_DELAY;
     80static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
     81static uint erase_delay    = CONFIG_NANDSIM_ERASE_DELAY;
     82static uint output_cycle   = CONFIG_NANDSIM_OUTPUT_CYCLE;
     83static uint input_cycle    = CONFIG_NANDSIM_INPUT_CYCLE;
     84static uint bus_width      = CONFIG_NANDSIM_BUS_WIDTH;
     85static uint do_delays      = CONFIG_NANDSIM_DO_DELAYS;
     86static uint log            = CONFIG_NANDSIM_LOG;
     87static uint dbg            = CONFIG_NANDSIM_DBG;
     88static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
     89static unsigned int parts_num;
     90static char *badblocks = NULL;
     91static char *weakblocks = NULL;
     92static char *weakpages = NULL;
     93static unsigned int bitflips = 0;
     94static char *gravepages = NULL;
     95static unsigned int overridesize = 0;
     96static char *cache_file = NULL;
     97static unsigned int bbt;
     98static unsigned int bch;
     99static u_char id_bytes[8] = {
    100	[0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
    101	[1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
    102	[2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
    103	[3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
    104	[4 ... 7] = 0xFF,
    105};
    106
    107module_param_array(id_bytes, byte, NULL, 0400);
    108module_param_named(first_id_byte, id_bytes[0], byte, 0400);
    109module_param_named(second_id_byte, id_bytes[1], byte, 0400);
    110module_param_named(third_id_byte, id_bytes[2], byte, 0400);
    111module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
    112module_param(access_delay,   uint, 0400);
    113module_param(programm_delay, uint, 0400);
    114module_param(erase_delay,    uint, 0400);
    115module_param(output_cycle,   uint, 0400);
    116module_param(input_cycle,    uint, 0400);
    117module_param(bus_width,      uint, 0400);
    118module_param(do_delays,      uint, 0400);
    119module_param(log,            uint, 0400);
    120module_param(dbg,            uint, 0400);
    121module_param_array(parts, ulong, &parts_num, 0400);
    122module_param(badblocks,      charp, 0400);
    123module_param(weakblocks,     charp, 0400);
    124module_param(weakpages,      charp, 0400);
    125module_param(bitflips,       uint, 0400);
    126module_param(gravepages,     charp, 0400);
    127module_param(overridesize,   uint, 0400);
    128module_param(cache_file,     charp, 0400);
    129module_param(bbt,	     uint, 0400);
    130module_param(bch,	     uint, 0400);
    131
    132MODULE_PARM_DESC(id_bytes,       "The ID bytes returned by NAND Flash 'read ID' command");
    133MODULE_PARM_DESC(first_id_byte,  "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
    134MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
    135MODULE_PARM_DESC(third_id_byte,  "The third byte returned by NAND Flash 'read ID' command (obsolete)");
    136MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
    137MODULE_PARM_DESC(access_delay,   "Initial page access delay (microseconds)");
    138MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
    139MODULE_PARM_DESC(erase_delay,    "Sector erase delay (milliseconds)");
    140MODULE_PARM_DESC(output_cycle,   "Word output (from flash) time (nanoseconds)");
    141MODULE_PARM_DESC(input_cycle,    "Word input (to flash) time (nanoseconds)");
    142MODULE_PARM_DESC(bus_width,      "Chip's bus width (8- or 16-bit)");
    143MODULE_PARM_DESC(do_delays,      "Simulate NAND delays using busy-waits if not zero");
    144MODULE_PARM_DESC(log,            "Perform logging if not zero");
    145MODULE_PARM_DESC(dbg,            "Output debug information if not zero");
    146MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by commas");
    147/* Page and erase block positions for the following parameters are independent of any partitions */
    148MODULE_PARM_DESC(badblocks,      "Erase blocks that are initially marked bad, separated by commas");
    149MODULE_PARM_DESC(weakblocks,     "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
    150				 " separated by commas e.g. 113:2 means eb 113"
    151				 " can be erased only twice before failing");
    152MODULE_PARM_DESC(weakpages,      "Weak pages [: maximum writes (defaults to 3)]"
    153				 " separated by commas e.g. 1401:2 means page 1401"
    154				 " can be written only twice before failing");
    155MODULE_PARM_DESC(bitflips,       "Maximum number of random bit flips per page (zero by default)");
    156MODULE_PARM_DESC(gravepages,     "Pages that lose data [: maximum reads (defaults to 3)]"
    157				 " separated by commas e.g. 1401:2 means page 1401"
    158				 " can be read only twice before failing");
    159MODULE_PARM_DESC(overridesize,   "Specifies the NAND Flash size overriding the ID bytes. "
    160				 "The size is specified in erase blocks and as the exponent of a power of two"
    161				 " e.g. 5 means a size of 32 erase blocks");
    162MODULE_PARM_DESC(cache_file,     "File to use to cache nand pages instead of memory");
    163MODULE_PARM_DESC(bbt,		 "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
    164MODULE_PARM_DESC(bch,		 "Enable BCH ecc and set how many bits should "
    165				 "be correctable in 512-byte blocks");
    166
    167/* The largest possible page size */
    168#define NS_LARGEST_PAGE_SIZE	4096
    169
    170/* Simulator's output macros (logging, debugging, warning, error) */
    171#define NS_LOG(args...) \
    172	do { if (log) pr_debug(" log: " args); } while(0)
    173#define NS_DBG(args...) \
    174	do { if (dbg) pr_debug(" debug: " args); } while(0)
    175#define NS_WARN(args...) \
    176	do { pr_warn(" warning: " args); } while(0)
    177#define NS_ERR(args...) \
    178	do { pr_err(" error: " args); } while(0)
    179#define NS_INFO(args...) \
    180	do { pr_info(" " args); } while(0)
    181
    182/* Busy-wait delay macros (microseconds, milliseconds) */
    183#define NS_UDELAY(us) \
    184        do { if (do_delays) udelay(us); } while(0)
    185#define NS_MDELAY(us) \
    186        do { if (do_delays) mdelay(us); } while(0)
    187
    188/* Is the nandsim structure initialized ? */
    189#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
    190
    191/* Good operation completion status */
    192#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
    193
    194/* Operation failed completion status */
    195#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
    196
    197/* Calculate the page offset in flash RAM image by (row, column) address */
    198#define NS_RAW_OFFSET(ns) \
    199	(((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
    200
    201/* Calculate the OOB offset in flash RAM image by (row, column) address */
    202#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
    203
    204/* Calculate the byte shift in the next page to access */
    205#define NS_PAGE_BYTE_SHIFT(ns) ((ns)->regs.column + (ns)->regs.off)
    206
    207/* After a command is input, the simulator goes to one of the following states */
    208#define STATE_CMD_READ0        0x00000001 /* read data from the beginning of page */
    209#define STATE_CMD_READ1        0x00000002 /* read data from the second half of page */
    210#define STATE_CMD_READSTART    0x00000003 /* read data second command (large page devices) */
    211#define STATE_CMD_PAGEPROG     0x00000004 /* start page program */
    212#define STATE_CMD_READOOB      0x00000005 /* read OOB area */
    213#define STATE_CMD_ERASE1       0x00000006 /* sector erase first command */
    214#define STATE_CMD_STATUS       0x00000007 /* read status */
    215#define STATE_CMD_SEQIN        0x00000009 /* sequential data input */
    216#define STATE_CMD_READID       0x0000000A /* read ID */
    217#define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
    218#define STATE_CMD_RESET        0x0000000C /* reset */
    219#define STATE_CMD_RNDOUT       0x0000000D /* random output command */
    220#define STATE_CMD_RNDOUTSTART  0x0000000E /* random output start command */
    221#define STATE_CMD_MASK         0x0000000F /* command states mask */
    222
    223/* After an address is input, the simulator goes to one of these states */
    224#define STATE_ADDR_PAGE        0x00000010 /* full (row, column) address is accepted */
    225#define STATE_ADDR_SEC         0x00000020 /* sector address was accepted */
    226#define STATE_ADDR_COLUMN      0x00000030 /* column address was accepted */
    227#define STATE_ADDR_ZERO        0x00000040 /* one byte zero address was accepted */
    228#define STATE_ADDR_MASK        0x00000070 /* address states mask */
    229
    230/* During data input/output the simulator is in these states */
    231#define STATE_DATAIN           0x00000100 /* waiting for data input */
    232#define STATE_DATAIN_MASK      0x00000100 /* data input states mask */
    233
    234#define STATE_DATAOUT          0x00001000 /* waiting for page data output */
    235#define STATE_DATAOUT_ID       0x00002000 /* waiting for ID bytes output */
    236#define STATE_DATAOUT_STATUS   0x00003000 /* waiting for status output */
    237#define STATE_DATAOUT_MASK     0x00007000 /* data output states mask */
    238
    239/* Previous operation is done, ready to accept new requests */
    240#define STATE_READY            0x00000000
    241
    242/* This state is used to mark that the next state isn't known yet */
    243#define STATE_UNKNOWN          0x10000000
    244
    245/* Simulator's actions bit masks */
    246#define ACTION_CPY       0x00100000 /* copy page/OOB to the internal buffer */
    247#define ACTION_PRGPAGE   0x00200000 /* program the internal buffer to flash */
    248#define ACTION_SECERASE  0x00300000 /* erase sector */
    249#define ACTION_ZEROOFF   0x00400000 /* don't add any offset to address */
    250#define ACTION_HALFOFF   0x00500000 /* add to address half of page */
    251#define ACTION_OOBOFF    0x00600000 /* add to address OOB offset */
    252#define ACTION_MASK      0x00700000 /* action mask */
    253
    254#define NS_OPER_NUM      13 /* Number of operations supported by the simulator */
    255#define NS_OPER_STATES   6  /* Maximum number of states in operation */
    256
    257#define OPT_ANY          0xFFFFFFFF /* any chip supports this operation */
    258#define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
    259#define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
    260#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
    261#define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
    262#define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
    263#define OPT_SMALLPAGE    (OPT_PAGE512) /* 512-byte page chips */
    264
    265/* Remove action bits from state */
    266#define NS_STATE(x) ((x) & ~ACTION_MASK)
    267
    268/*
    269 * Maximum previous states which need to be saved. Currently saving is
    270 * only needed for page program operation with preceded read command
    271 * (which is only valid for 512-byte pages).
    272 */
    273#define NS_MAX_PREVSTATES 1
    274
    275/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
    276#define NS_MAX_HELD_PAGES 16
    277
    278/*
    279 * A union to represent flash memory contents and flash buffer.
    280 */
    281union ns_mem {
    282	u_char *byte;    /* for byte access */
    283	uint16_t *word;  /* for 16-bit word access */
    284};
    285
    286/*
    287 * The structure which describes all the internal simulator data.
    288 */
    289struct nandsim {
    290	struct nand_chip chip;
    291	struct nand_controller base;
    292	struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
    293	unsigned int nbparts;
    294
    295	uint busw;              /* flash chip bus width (8 or 16) */
    296	u_char ids[8];          /* chip's ID bytes */
    297	uint32_t options;       /* chip's characteristic bits */
    298	uint32_t state;         /* current chip state */
    299	uint32_t nxstate;       /* next expected state */
    300
    301	uint32_t *op;           /* current operation, NULL operations isn't known yet  */
    302	uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
    303	uint16_t npstates;      /* number of previous states saved */
    304	uint16_t stateidx;      /* current state index */
    305
    306	/* The simulated NAND flash pages array */
    307	union ns_mem *pages;
    308
    309	/* Slab allocator for nand pages */
    310	struct kmem_cache *nand_pages_slab;
    311
    312	/* Internal buffer of page + OOB size bytes */
    313	union ns_mem buf;
    314
    315	/* NAND flash "geometry" */
    316	struct {
    317		uint64_t totsz;     /* total flash size, bytes */
    318		uint32_t secsz;     /* flash sector (erase block) size, bytes */
    319		uint pgsz;          /* NAND flash page size, bytes */
    320		uint oobsz;         /* page OOB area size, bytes */
    321		uint64_t totszoob;  /* total flash size including OOB, bytes */
    322		uint pgszoob;       /* page size including OOB , bytes*/
    323		uint secszoob;      /* sector size including OOB, bytes */
    324		uint pgnum;         /* total number of pages */
    325		uint pgsec;         /* number of pages per sector */
    326		uint secshift;      /* bits number in sector size */
    327		uint pgshift;       /* bits number in page size */
    328		uint pgaddrbytes;   /* bytes per page address */
    329		uint secaddrbytes;  /* bytes per sector address */
    330		uint idbytes;       /* the number ID bytes that this chip outputs */
    331	} geom;
    332
    333	/* NAND flash internal registers */
    334	struct {
    335		unsigned command; /* the command register */
    336		u_char   status;  /* the status register */
    337		uint     row;     /* the page number */
    338		uint     column;  /* the offset within page */
    339		uint     count;   /* internal counter */
    340		uint     num;     /* number of bytes which must be processed */
    341		uint     off;     /* fixed page offset */
    342	} regs;
    343
    344	/* NAND flash lines state */
    345        struct {
    346                int ce;  /* chip Enable */
    347                int cle; /* command Latch Enable */
    348                int ale; /* address Latch Enable */
    349                int wp;  /* write Protect */
    350        } lines;
    351
    352	/* Fields needed when using a cache file */
    353	struct file *cfile; /* Open file */
    354	unsigned long *pages_written; /* Which pages have been written */
    355	void *file_buf;
    356	struct page *held_pages[NS_MAX_HELD_PAGES];
    357	int held_cnt;
    358
    359	/* debugfs entry */
    360	struct dentry *dent;
    361};
    362
    363/*
    364 * Operations array. To perform any operation the simulator must pass
    365 * through the correspondent states chain.
    366 */
    367static struct nandsim_operations {
    368	uint32_t reqopts;  /* options which are required to perform the operation */
    369	uint32_t states[NS_OPER_STATES]; /* operation's states */
    370} ops[NS_OPER_NUM] = {
    371	/* Read page + OOB from the beginning */
    372	{OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
    373			STATE_DATAOUT, STATE_READY}},
    374	/* Read page + OOB from the second half */
    375	{OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
    376			STATE_DATAOUT, STATE_READY}},
    377	/* Read OOB */
    378	{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
    379			STATE_DATAOUT, STATE_READY}},
    380	/* Program page starting from the beginning */
    381	{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
    382			STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
    383	/* Program page starting from the beginning */
    384	{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
    385			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
    386	/* Program page starting from the second half */
    387	{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
    388			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
    389	/* Program OOB */
    390	{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
    391			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
    392	/* Erase sector */
    393	{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
    394	/* Read status */
    395	{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
    396	/* Read ID */
    397	{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
    398	/* Large page devices read page */
    399	{OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
    400			       STATE_DATAOUT, STATE_READY}},
    401	/* Large page devices random page read */
    402	{OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
    403			       STATE_DATAOUT, STATE_READY}},
    404};
    405
    406struct weak_block {
    407	struct list_head list;
    408	unsigned int erase_block_no;
    409	unsigned int max_erases;
    410	unsigned int erases_done;
    411};
    412
    413static LIST_HEAD(weak_blocks);
    414
    415struct weak_page {
    416	struct list_head list;
    417	unsigned int page_no;
    418	unsigned int max_writes;
    419	unsigned int writes_done;
    420};
    421
    422static LIST_HEAD(weak_pages);
    423
    424struct grave_page {
    425	struct list_head list;
    426	unsigned int page_no;
    427	unsigned int max_reads;
    428	unsigned int reads_done;
    429};
    430
    431static LIST_HEAD(grave_pages);
    432
    433static unsigned long *erase_block_wear = NULL;
    434static unsigned int wear_eb_count = 0;
    435static unsigned long total_wear = 0;
    436
    437/* MTD structure for NAND controller */
    438static struct mtd_info *nsmtd;
    439
    440static int ns_show(struct seq_file *m, void *private)
    441{
    442	unsigned long wmin = -1, wmax = 0, avg;
    443	unsigned long deciles[10], decile_max[10], tot = 0;
    444	unsigned int i;
    445
    446	/* Calc wear stats */
    447	for (i = 0; i < wear_eb_count; ++i) {
    448		unsigned long wear = erase_block_wear[i];
    449		if (wear < wmin)
    450			wmin = wear;
    451		if (wear > wmax)
    452			wmax = wear;
    453		tot += wear;
    454	}
    455
    456	for (i = 0; i < 9; ++i) {
    457		deciles[i] = 0;
    458		decile_max[i] = (wmax * (i + 1) + 5) / 10;
    459	}
    460	deciles[9] = 0;
    461	decile_max[9] = wmax;
    462	for (i = 0; i < wear_eb_count; ++i) {
    463		int d;
    464		unsigned long wear = erase_block_wear[i];
    465		for (d = 0; d < 10; ++d)
    466			if (wear <= decile_max[d]) {
    467				deciles[d] += 1;
    468				break;
    469			}
    470	}
    471	avg = tot / wear_eb_count;
    472
    473	/* Output wear report */
    474	seq_printf(m, "Total numbers of erases:  %lu\n", tot);
    475	seq_printf(m, "Number of erase blocks:   %u\n", wear_eb_count);
    476	seq_printf(m, "Average number of erases: %lu\n", avg);
    477	seq_printf(m, "Maximum number of erases: %lu\n", wmax);
    478	seq_printf(m, "Minimum number of erases: %lu\n", wmin);
    479	for (i = 0; i < 10; ++i) {
    480		unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
    481		if (from > decile_max[i])
    482			continue;
    483		seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
    484			from,
    485			decile_max[i],
    486			deciles[i]);
    487	}
    488
    489	return 0;
    490}
    491DEFINE_SHOW_ATTRIBUTE(ns);
    492
    493/**
    494 * ns_debugfs_create - initialize debugfs
    495 * @ns: nandsim device description object
    496 *
    497 * This function creates all debugfs files for UBI device @ubi. Returns zero in
    498 * case of success and a negative error code in case of failure.
    499 */
    500static int ns_debugfs_create(struct nandsim *ns)
    501{
    502	struct dentry *root = nsmtd->dbg.dfs_dir;
    503
    504	/*
    505	 * Just skip debugfs initialization when the debugfs directory is
    506	 * missing.
    507	 */
    508	if (IS_ERR_OR_NULL(root)) {
    509		if (IS_ENABLED(CONFIG_DEBUG_FS) &&
    510		    !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
    511			NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
    512		return 0;
    513	}
    514
    515	ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
    516				       &ns_fops);
    517	if (IS_ERR_OR_NULL(ns->dent)) {
    518		NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
    519		return -1;
    520	}
    521
    522	return 0;
    523}
    524
    525static void ns_debugfs_remove(struct nandsim *ns)
    526{
    527	debugfs_remove_recursive(ns->dent);
    528}
    529
    530/*
    531 * Allocate array of page pointers, create slab allocation for an array
    532 * and initialize the array by NULL pointers.
    533 *
    534 * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
    535 */
    536static int __init ns_alloc_device(struct nandsim *ns)
    537{
    538	struct file *cfile;
    539	int i, err;
    540
    541	if (cache_file) {
    542		cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
    543		if (IS_ERR(cfile))
    544			return PTR_ERR(cfile);
    545		if (!(cfile->f_mode & FMODE_CAN_READ)) {
    546			NS_ERR("alloc_device: cache file not readable\n");
    547			err = -EINVAL;
    548			goto err_close_filp;
    549		}
    550		if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
    551			NS_ERR("alloc_device: cache file not writeable\n");
    552			err = -EINVAL;
    553			goto err_close_filp;
    554		}
    555		ns->pages_written =
    556			vzalloc(array_size(sizeof(unsigned long),
    557					   BITS_TO_LONGS(ns->geom.pgnum)));
    558		if (!ns->pages_written) {
    559			NS_ERR("alloc_device: unable to allocate pages written array\n");
    560			err = -ENOMEM;
    561			goto err_close_filp;
    562		}
    563		ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
    564		if (!ns->file_buf) {
    565			NS_ERR("alloc_device: unable to allocate file buf\n");
    566			err = -ENOMEM;
    567			goto err_free_pw;
    568		}
    569		ns->cfile = cfile;
    570
    571		return 0;
    572
    573err_free_pw:
    574		vfree(ns->pages_written);
    575err_close_filp:
    576		filp_close(cfile, NULL);
    577
    578		return err;
    579	}
    580
    581	ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
    582	if (!ns->pages) {
    583		NS_ERR("alloc_device: unable to allocate page array\n");
    584		return -ENOMEM;
    585	}
    586	for (i = 0; i < ns->geom.pgnum; i++) {
    587		ns->pages[i].byte = NULL;
    588	}
    589	ns->nand_pages_slab = kmem_cache_create("nandsim",
    590						ns->geom.pgszoob, 0, 0, NULL);
    591	if (!ns->nand_pages_slab) {
    592		NS_ERR("cache_create: unable to create kmem_cache\n");
    593		err = -ENOMEM;
    594		goto err_free_pg;
    595	}
    596
    597	return 0;
    598
    599err_free_pg:
    600	vfree(ns->pages);
    601
    602	return err;
    603}
    604
    605/*
    606 * Free any allocated pages, and free the array of page pointers.
    607 */
    608static void ns_free_device(struct nandsim *ns)
    609{
    610	int i;
    611
    612	if (ns->cfile) {
    613		kfree(ns->file_buf);
    614		vfree(ns->pages_written);
    615		filp_close(ns->cfile, NULL);
    616		return;
    617	}
    618
    619	if (ns->pages) {
    620		for (i = 0; i < ns->geom.pgnum; i++) {
    621			if (ns->pages[i].byte)
    622				kmem_cache_free(ns->nand_pages_slab,
    623						ns->pages[i].byte);
    624		}
    625		kmem_cache_destroy(ns->nand_pages_slab);
    626		vfree(ns->pages);
    627	}
    628}
    629
    630static char __init *ns_get_partition_name(int i)
    631{
    632	return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
    633}
    634
    635/*
    636 * Initialize the nandsim structure.
    637 *
    638 * RETURNS: 0 if success, -ERRNO if failure.
    639 */
    640static int __init ns_init(struct mtd_info *mtd)
    641{
    642	struct nand_chip *chip = mtd_to_nand(mtd);
    643	struct nandsim   *ns   = nand_get_controller_data(chip);
    644	int i, ret = 0;
    645	uint64_t remains;
    646	uint64_t next_offset;
    647
    648	if (NS_IS_INITIALIZED(ns)) {
    649		NS_ERR("init_nandsim: nandsim is already initialized\n");
    650		return -EIO;
    651	}
    652
    653	/* Initialize the NAND flash parameters */
    654	ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
    655	ns->geom.totsz    = mtd->size;
    656	ns->geom.pgsz     = mtd->writesize;
    657	ns->geom.oobsz    = mtd->oobsize;
    658	ns->geom.secsz    = mtd->erasesize;
    659	ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
    660	ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
    661	ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
    662	ns->geom.secshift = ffs(ns->geom.secsz) - 1;
    663	ns->geom.pgshift  = chip->page_shift;
    664	ns->geom.pgsec    = ns->geom.secsz / ns->geom.pgsz;
    665	ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
    666	ns->options = 0;
    667
    668	if (ns->geom.pgsz == 512) {
    669		ns->options |= OPT_PAGE512;
    670		if (ns->busw == 8)
    671			ns->options |= OPT_PAGE512_8BIT;
    672	} else if (ns->geom.pgsz == 2048) {
    673		ns->options |= OPT_PAGE2048;
    674	} else if (ns->geom.pgsz == 4096) {
    675		ns->options |= OPT_PAGE4096;
    676	} else {
    677		NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
    678		return -EIO;
    679	}
    680
    681	if (ns->options & OPT_SMALLPAGE) {
    682		if (ns->geom.totsz <= (32 << 20)) {
    683			ns->geom.pgaddrbytes  = 3;
    684			ns->geom.secaddrbytes = 2;
    685		} else {
    686			ns->geom.pgaddrbytes  = 4;
    687			ns->geom.secaddrbytes = 3;
    688		}
    689	} else {
    690		if (ns->geom.totsz <= (128 << 20)) {
    691			ns->geom.pgaddrbytes  = 4;
    692			ns->geom.secaddrbytes = 2;
    693		} else {
    694			ns->geom.pgaddrbytes  = 5;
    695			ns->geom.secaddrbytes = 3;
    696		}
    697	}
    698
    699	/* Fill the partition_info structure */
    700	if (parts_num > ARRAY_SIZE(ns->partitions)) {
    701		NS_ERR("too many partitions.\n");
    702		return -EINVAL;
    703	}
    704	remains = ns->geom.totsz;
    705	next_offset = 0;
    706	for (i = 0; i < parts_num; ++i) {
    707		uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
    708
    709		if (!part_sz || part_sz > remains) {
    710			NS_ERR("bad partition size.\n");
    711			return -EINVAL;
    712		}
    713		ns->partitions[i].name = ns_get_partition_name(i);
    714		if (!ns->partitions[i].name) {
    715			NS_ERR("unable to allocate memory.\n");
    716			return -ENOMEM;
    717		}
    718		ns->partitions[i].offset = next_offset;
    719		ns->partitions[i].size   = part_sz;
    720		next_offset += ns->partitions[i].size;
    721		remains -= ns->partitions[i].size;
    722	}
    723	ns->nbparts = parts_num;
    724	if (remains) {
    725		if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
    726			NS_ERR("too many partitions.\n");
    727			ret = -EINVAL;
    728			goto free_partition_names;
    729		}
    730		ns->partitions[i].name = ns_get_partition_name(i);
    731		if (!ns->partitions[i].name) {
    732			NS_ERR("unable to allocate memory.\n");
    733			ret = -ENOMEM;
    734			goto free_partition_names;
    735		}
    736		ns->partitions[i].offset = next_offset;
    737		ns->partitions[i].size   = remains;
    738		ns->nbparts += 1;
    739	}
    740
    741	if (ns->busw == 16)
    742		NS_WARN("16-bit flashes support wasn't tested\n");
    743
    744	printk("flash size: %llu MiB\n",
    745			(unsigned long long)ns->geom.totsz >> 20);
    746	printk("page size: %u bytes\n",         ns->geom.pgsz);
    747	printk("OOB area size: %u bytes\n",     ns->geom.oobsz);
    748	printk("sector size: %u KiB\n",         ns->geom.secsz >> 10);
    749	printk("pages number: %u\n",            ns->geom.pgnum);
    750	printk("pages per sector: %u\n",        ns->geom.pgsec);
    751	printk("bus width: %u\n",               ns->busw);
    752	printk("bits in sector size: %u\n",     ns->geom.secshift);
    753	printk("bits in page size: %u\n",       ns->geom.pgshift);
    754	printk("bits in OOB size: %u\n",	ffs(ns->geom.oobsz) - 1);
    755	printk("flash size with OOB: %llu KiB\n",
    756			(unsigned long long)ns->geom.totszoob >> 10);
    757	printk("page address bytes: %u\n",      ns->geom.pgaddrbytes);
    758	printk("sector address bytes: %u\n",    ns->geom.secaddrbytes);
    759	printk("options: %#x\n",                ns->options);
    760
    761	ret = ns_alloc_device(ns);
    762	if (ret)
    763		goto free_partition_names;
    764
    765	/* Allocate / initialize the internal buffer */
    766	ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
    767	if (!ns->buf.byte) {
    768		NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
    769			ns->geom.pgszoob);
    770		ret = -ENOMEM;
    771		goto free_device;
    772	}
    773	memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
    774
    775	return 0;
    776
    777free_device:
    778	ns_free_device(ns);
    779free_partition_names:
    780	for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
    781		kfree(ns->partitions[i].name);
    782
    783	return ret;
    784}
    785
    786/*
    787 * Free the nandsim structure.
    788 */
    789static void ns_free(struct nandsim *ns)
    790{
    791	int i;
    792
    793	for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
    794		kfree(ns->partitions[i].name);
    795
    796	kfree(ns->buf.byte);
    797	ns_free_device(ns);
    798
    799	return;
    800}
    801
    802static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
    803{
    804	char *w;
    805	int zero_ok;
    806	unsigned int erase_block_no;
    807	loff_t offset;
    808
    809	if (!badblocks)
    810		return 0;
    811	w = badblocks;
    812	do {
    813		zero_ok = (*w == '0' ? 1 : 0);
    814		erase_block_no = simple_strtoul(w, &w, 0);
    815		if (!zero_ok && !erase_block_no) {
    816			NS_ERR("invalid badblocks.\n");
    817			return -EINVAL;
    818		}
    819		offset = (loff_t)erase_block_no * ns->geom.secsz;
    820		if (mtd_block_markbad(mtd, offset)) {
    821			NS_ERR("invalid badblocks.\n");
    822			return -EINVAL;
    823		}
    824		if (*w == ',')
    825			w += 1;
    826	} while (*w);
    827	return 0;
    828}
    829
    830static int ns_parse_weakblocks(void)
    831{
    832	char *w;
    833	int zero_ok;
    834	unsigned int erase_block_no;
    835	unsigned int max_erases;
    836	struct weak_block *wb;
    837
    838	if (!weakblocks)
    839		return 0;
    840	w = weakblocks;
    841	do {
    842		zero_ok = (*w == '0' ? 1 : 0);
    843		erase_block_no = simple_strtoul(w, &w, 0);
    844		if (!zero_ok && !erase_block_no) {
    845			NS_ERR("invalid weakblocks.\n");
    846			return -EINVAL;
    847		}
    848		max_erases = 3;
    849		if (*w == ':') {
    850			w += 1;
    851			max_erases = simple_strtoul(w, &w, 0);
    852		}
    853		if (*w == ',')
    854			w += 1;
    855		wb = kzalloc(sizeof(*wb), GFP_KERNEL);
    856		if (!wb) {
    857			NS_ERR("unable to allocate memory.\n");
    858			return -ENOMEM;
    859		}
    860		wb->erase_block_no = erase_block_no;
    861		wb->max_erases = max_erases;
    862		list_add(&wb->list, &weak_blocks);
    863	} while (*w);
    864	return 0;
    865}
    866
    867static int ns_erase_error(unsigned int erase_block_no)
    868{
    869	struct weak_block *wb;
    870
    871	list_for_each_entry(wb, &weak_blocks, list)
    872		if (wb->erase_block_no == erase_block_no) {
    873			if (wb->erases_done >= wb->max_erases)
    874				return 1;
    875			wb->erases_done += 1;
    876			return 0;
    877		}
    878	return 0;
    879}
    880
    881static int ns_parse_weakpages(void)
    882{
    883	char *w;
    884	int zero_ok;
    885	unsigned int page_no;
    886	unsigned int max_writes;
    887	struct weak_page *wp;
    888
    889	if (!weakpages)
    890		return 0;
    891	w = weakpages;
    892	do {
    893		zero_ok = (*w == '0' ? 1 : 0);
    894		page_no = simple_strtoul(w, &w, 0);
    895		if (!zero_ok && !page_no) {
    896			NS_ERR("invalid weakpages.\n");
    897			return -EINVAL;
    898		}
    899		max_writes = 3;
    900		if (*w == ':') {
    901			w += 1;
    902			max_writes = simple_strtoul(w, &w, 0);
    903		}
    904		if (*w == ',')
    905			w += 1;
    906		wp = kzalloc(sizeof(*wp), GFP_KERNEL);
    907		if (!wp) {
    908			NS_ERR("unable to allocate memory.\n");
    909			return -ENOMEM;
    910		}
    911		wp->page_no = page_no;
    912		wp->max_writes = max_writes;
    913		list_add(&wp->list, &weak_pages);
    914	} while (*w);
    915	return 0;
    916}
    917
    918static int ns_write_error(unsigned int page_no)
    919{
    920	struct weak_page *wp;
    921
    922	list_for_each_entry(wp, &weak_pages, list)
    923		if (wp->page_no == page_no) {
    924			if (wp->writes_done >= wp->max_writes)
    925				return 1;
    926			wp->writes_done += 1;
    927			return 0;
    928		}
    929	return 0;
    930}
    931
    932static int ns_parse_gravepages(void)
    933{
    934	char *g;
    935	int zero_ok;
    936	unsigned int page_no;
    937	unsigned int max_reads;
    938	struct grave_page *gp;
    939
    940	if (!gravepages)
    941		return 0;
    942	g = gravepages;
    943	do {
    944		zero_ok = (*g == '0' ? 1 : 0);
    945		page_no = simple_strtoul(g, &g, 0);
    946		if (!zero_ok && !page_no) {
    947			NS_ERR("invalid gravepagess.\n");
    948			return -EINVAL;
    949		}
    950		max_reads = 3;
    951		if (*g == ':') {
    952			g += 1;
    953			max_reads = simple_strtoul(g, &g, 0);
    954		}
    955		if (*g == ',')
    956			g += 1;
    957		gp = kzalloc(sizeof(*gp), GFP_KERNEL);
    958		if (!gp) {
    959			NS_ERR("unable to allocate memory.\n");
    960			return -ENOMEM;
    961		}
    962		gp->page_no = page_no;
    963		gp->max_reads = max_reads;
    964		list_add(&gp->list, &grave_pages);
    965	} while (*g);
    966	return 0;
    967}
    968
    969static int ns_read_error(unsigned int page_no)
    970{
    971	struct grave_page *gp;
    972
    973	list_for_each_entry(gp, &grave_pages, list)
    974		if (gp->page_no == page_no) {
    975			if (gp->reads_done >= gp->max_reads)
    976				return 1;
    977			gp->reads_done += 1;
    978			return 0;
    979		}
    980	return 0;
    981}
    982
    983static int ns_setup_wear_reporting(struct mtd_info *mtd)
    984{
    985	wear_eb_count = div_u64(mtd->size, mtd->erasesize);
    986	erase_block_wear = kcalloc(wear_eb_count, sizeof(unsigned long), GFP_KERNEL);
    987	if (!erase_block_wear) {
    988		NS_ERR("Too many erase blocks for wear reporting\n");
    989		return -ENOMEM;
    990	}
    991	return 0;
    992}
    993
    994static void ns_update_wear(unsigned int erase_block_no)
    995{
    996	if (!erase_block_wear)
    997		return;
    998	total_wear += 1;
    999	/*
   1000	 * TODO: Notify this through a debugfs entry,
   1001	 * instead of showing an error message.
   1002	 */
   1003	if (total_wear == 0)
   1004		NS_ERR("Erase counter total overflow\n");
   1005	erase_block_wear[erase_block_no] += 1;
   1006	if (erase_block_wear[erase_block_no] == 0)
   1007		NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
   1008}
   1009
   1010/*
   1011 * Returns the string representation of 'state' state.
   1012 */
   1013static char *ns_get_state_name(uint32_t state)
   1014{
   1015	switch (NS_STATE(state)) {
   1016		case STATE_CMD_READ0:
   1017			return "STATE_CMD_READ0";
   1018		case STATE_CMD_READ1:
   1019			return "STATE_CMD_READ1";
   1020		case STATE_CMD_PAGEPROG:
   1021			return "STATE_CMD_PAGEPROG";
   1022		case STATE_CMD_READOOB:
   1023			return "STATE_CMD_READOOB";
   1024		case STATE_CMD_READSTART:
   1025			return "STATE_CMD_READSTART";
   1026		case STATE_CMD_ERASE1:
   1027			return "STATE_CMD_ERASE1";
   1028		case STATE_CMD_STATUS:
   1029			return "STATE_CMD_STATUS";
   1030		case STATE_CMD_SEQIN:
   1031			return "STATE_CMD_SEQIN";
   1032		case STATE_CMD_READID:
   1033			return "STATE_CMD_READID";
   1034		case STATE_CMD_ERASE2:
   1035			return "STATE_CMD_ERASE2";
   1036		case STATE_CMD_RESET:
   1037			return "STATE_CMD_RESET";
   1038		case STATE_CMD_RNDOUT:
   1039			return "STATE_CMD_RNDOUT";
   1040		case STATE_CMD_RNDOUTSTART:
   1041			return "STATE_CMD_RNDOUTSTART";
   1042		case STATE_ADDR_PAGE:
   1043			return "STATE_ADDR_PAGE";
   1044		case STATE_ADDR_SEC:
   1045			return "STATE_ADDR_SEC";
   1046		case STATE_ADDR_ZERO:
   1047			return "STATE_ADDR_ZERO";
   1048		case STATE_ADDR_COLUMN:
   1049			return "STATE_ADDR_COLUMN";
   1050		case STATE_DATAIN:
   1051			return "STATE_DATAIN";
   1052		case STATE_DATAOUT:
   1053			return "STATE_DATAOUT";
   1054		case STATE_DATAOUT_ID:
   1055			return "STATE_DATAOUT_ID";
   1056		case STATE_DATAOUT_STATUS:
   1057			return "STATE_DATAOUT_STATUS";
   1058		case STATE_READY:
   1059			return "STATE_READY";
   1060		case STATE_UNKNOWN:
   1061			return "STATE_UNKNOWN";
   1062	}
   1063
   1064	NS_ERR("get_state_name: unknown state, BUG\n");
   1065	return NULL;
   1066}
   1067
   1068/*
   1069 * Check if command is valid.
   1070 *
   1071 * RETURNS: 1 if wrong command, 0 if right.
   1072 */
   1073static int ns_check_command(int cmd)
   1074{
   1075	switch (cmd) {
   1076
   1077	case NAND_CMD_READ0:
   1078	case NAND_CMD_READ1:
   1079	case NAND_CMD_READSTART:
   1080	case NAND_CMD_PAGEPROG:
   1081	case NAND_CMD_READOOB:
   1082	case NAND_CMD_ERASE1:
   1083	case NAND_CMD_STATUS:
   1084	case NAND_CMD_SEQIN:
   1085	case NAND_CMD_READID:
   1086	case NAND_CMD_ERASE2:
   1087	case NAND_CMD_RESET:
   1088	case NAND_CMD_RNDOUT:
   1089	case NAND_CMD_RNDOUTSTART:
   1090		return 0;
   1091
   1092	default:
   1093		return 1;
   1094	}
   1095}
   1096
   1097/*
   1098 * Returns state after command is accepted by command number.
   1099 */
   1100static uint32_t ns_get_state_by_command(unsigned command)
   1101{
   1102	switch (command) {
   1103		case NAND_CMD_READ0:
   1104			return STATE_CMD_READ0;
   1105		case NAND_CMD_READ1:
   1106			return STATE_CMD_READ1;
   1107		case NAND_CMD_PAGEPROG:
   1108			return STATE_CMD_PAGEPROG;
   1109		case NAND_CMD_READSTART:
   1110			return STATE_CMD_READSTART;
   1111		case NAND_CMD_READOOB:
   1112			return STATE_CMD_READOOB;
   1113		case NAND_CMD_ERASE1:
   1114			return STATE_CMD_ERASE1;
   1115		case NAND_CMD_STATUS:
   1116			return STATE_CMD_STATUS;
   1117		case NAND_CMD_SEQIN:
   1118			return STATE_CMD_SEQIN;
   1119		case NAND_CMD_READID:
   1120			return STATE_CMD_READID;
   1121		case NAND_CMD_ERASE2:
   1122			return STATE_CMD_ERASE2;
   1123		case NAND_CMD_RESET:
   1124			return STATE_CMD_RESET;
   1125		case NAND_CMD_RNDOUT:
   1126			return STATE_CMD_RNDOUT;
   1127		case NAND_CMD_RNDOUTSTART:
   1128			return STATE_CMD_RNDOUTSTART;
   1129	}
   1130
   1131	NS_ERR("get_state_by_command: unknown command, BUG\n");
   1132	return 0;
   1133}
   1134
   1135/*
   1136 * Move an address byte to the correspondent internal register.
   1137 */
   1138static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
   1139{
   1140	uint byte = (uint)bt;
   1141
   1142	if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
   1143		ns->regs.column |= (byte << 8 * ns->regs.count);
   1144	else {
   1145		ns->regs.row |= (byte << 8 * (ns->regs.count -
   1146						ns->geom.pgaddrbytes +
   1147						ns->geom.secaddrbytes));
   1148	}
   1149
   1150	return;
   1151}
   1152
   1153/*
   1154 * Switch to STATE_READY state.
   1155 */
   1156static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
   1157{
   1158	NS_DBG("switch_to_ready_state: switch to %s state\n",
   1159	       ns_get_state_name(STATE_READY));
   1160
   1161	ns->state       = STATE_READY;
   1162	ns->nxstate     = STATE_UNKNOWN;
   1163	ns->op          = NULL;
   1164	ns->npstates    = 0;
   1165	ns->stateidx    = 0;
   1166	ns->regs.num    = 0;
   1167	ns->regs.count  = 0;
   1168	ns->regs.off    = 0;
   1169	ns->regs.row    = 0;
   1170	ns->regs.column = 0;
   1171	ns->regs.status = status;
   1172}
   1173
   1174/*
   1175 * If the operation isn't known yet, try to find it in the global array
   1176 * of supported operations.
   1177 *
   1178 * Operation can be unknown because of the following.
   1179 *   1. New command was accepted and this is the first call to find the
   1180 *      correspondent states chain. In this case ns->npstates = 0;
   1181 *   2. There are several operations which begin with the same command(s)
   1182 *      (for example program from the second half and read from the
   1183 *      second half operations both begin with the READ1 command). In this
   1184 *      case the ns->pstates[] array contains previous states.
   1185 *
   1186 * Thus, the function tries to find operation containing the following
   1187 * states (if the 'flag' parameter is 0):
   1188 *    ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
   1189 *
   1190 * If (one and only one) matching operation is found, it is accepted (
   1191 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
   1192 * zeroed).
   1193 *
   1194 * If there are several matches, the current state is pushed to the
   1195 * ns->pstates.
   1196 *
   1197 * The operation can be unknown only while commands are input to the chip.
   1198 * As soon as address command is accepted, the operation must be known.
   1199 * In such situation the function is called with 'flag' != 0, and the
   1200 * operation is searched using the following pattern:
   1201 *     ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
   1202 *
   1203 * It is supposed that this pattern must either match one operation or
   1204 * none. There can't be ambiguity in that case.
   1205 *
   1206 * If no matches found, the function does the following:
   1207 *   1. if there are saved states present, try to ignore them and search
   1208 *      again only using the last command. If nothing was found, switch
   1209 *      to the STATE_READY state.
   1210 *   2. if there are no saved states, switch to the STATE_READY state.
   1211 *
   1212 * RETURNS: -2 - no matched operations found.
   1213 *          -1 - several matches.
   1214 *           0 - operation is found.
   1215 */
   1216static int ns_find_operation(struct nandsim *ns, uint32_t flag)
   1217{
   1218	int opsfound = 0;
   1219	int i, j, idx = 0;
   1220
   1221	for (i = 0; i < NS_OPER_NUM; i++) {
   1222
   1223		int found = 1;
   1224
   1225		if (!(ns->options & ops[i].reqopts))
   1226			/* Ignore operations we can't perform */
   1227			continue;
   1228
   1229		if (flag) {
   1230			if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
   1231				continue;
   1232		} else {
   1233			if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
   1234				continue;
   1235		}
   1236
   1237		for (j = 0; j < ns->npstates; j++)
   1238			if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
   1239				&& (ns->options & ops[idx].reqopts)) {
   1240				found = 0;
   1241				break;
   1242			}
   1243
   1244		if (found) {
   1245			idx = i;
   1246			opsfound += 1;
   1247		}
   1248	}
   1249
   1250	if (opsfound == 1) {
   1251		/* Exact match */
   1252		ns->op = &ops[idx].states[0];
   1253		if (flag) {
   1254			/*
   1255			 * In this case the find_operation function was
   1256			 * called when address has just began input. But it isn't
   1257			 * yet fully input and the current state must
   1258			 * not be one of STATE_ADDR_*, but the STATE_ADDR_*
   1259			 * state must be the next state (ns->nxstate).
   1260			 */
   1261			ns->stateidx = ns->npstates - 1;
   1262		} else {
   1263			ns->stateidx = ns->npstates;
   1264		}
   1265		ns->npstates = 0;
   1266		ns->state = ns->op[ns->stateidx];
   1267		ns->nxstate = ns->op[ns->stateidx + 1];
   1268		NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
   1269		       idx, ns_get_state_name(ns->state),
   1270		       ns_get_state_name(ns->nxstate));
   1271		return 0;
   1272	}
   1273
   1274	if (opsfound == 0) {
   1275		/* Nothing was found. Try to ignore previous commands (if any) and search again */
   1276		if (ns->npstates != 0) {
   1277			NS_DBG("find_operation: no operation found, try again with state %s\n",
   1278			       ns_get_state_name(ns->state));
   1279			ns->npstates = 0;
   1280			return ns_find_operation(ns, 0);
   1281
   1282		}
   1283		NS_DBG("find_operation: no operations found\n");
   1284		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   1285		return -2;
   1286	}
   1287
   1288	if (flag) {
   1289		/* This shouldn't happen */
   1290		NS_DBG("find_operation: BUG, operation must be known if address is input\n");
   1291		return -2;
   1292	}
   1293
   1294	NS_DBG("find_operation: there is still ambiguity\n");
   1295
   1296	ns->pstates[ns->npstates++] = ns->state;
   1297
   1298	return -1;
   1299}
   1300
   1301static void ns_put_pages(struct nandsim *ns)
   1302{
   1303	int i;
   1304
   1305	for (i = 0; i < ns->held_cnt; i++)
   1306		put_page(ns->held_pages[i]);
   1307}
   1308
   1309/* Get page cache pages in advance to provide NOFS memory allocation */
   1310static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
   1311			loff_t pos)
   1312{
   1313	pgoff_t index, start_index, end_index;
   1314	struct page *page;
   1315	struct address_space *mapping = file->f_mapping;
   1316
   1317	start_index = pos >> PAGE_SHIFT;
   1318	end_index = (pos + count - 1) >> PAGE_SHIFT;
   1319	if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
   1320		return -EINVAL;
   1321	ns->held_cnt = 0;
   1322	for (index = start_index; index <= end_index; index++) {
   1323		page = find_get_page(mapping, index);
   1324		if (page == NULL) {
   1325			page = find_or_create_page(mapping, index, GFP_NOFS);
   1326			if (page == NULL) {
   1327				write_inode_now(mapping->host, 1);
   1328				page = find_or_create_page(mapping, index, GFP_NOFS);
   1329			}
   1330			if (page == NULL) {
   1331				ns_put_pages(ns);
   1332				return -ENOMEM;
   1333			}
   1334			unlock_page(page);
   1335		}
   1336		ns->held_pages[ns->held_cnt++] = page;
   1337	}
   1338	return 0;
   1339}
   1340
   1341static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
   1342			    size_t count, loff_t pos)
   1343{
   1344	ssize_t tx;
   1345	int err;
   1346	unsigned int noreclaim_flag;
   1347
   1348	err = ns_get_pages(ns, file, count, pos);
   1349	if (err)
   1350		return err;
   1351	noreclaim_flag = memalloc_noreclaim_save();
   1352	tx = kernel_read(file, buf, count, &pos);
   1353	memalloc_noreclaim_restore(noreclaim_flag);
   1354	ns_put_pages(ns);
   1355	return tx;
   1356}
   1357
   1358static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
   1359			     size_t count, loff_t pos)
   1360{
   1361	ssize_t tx;
   1362	int err;
   1363	unsigned int noreclaim_flag;
   1364
   1365	err = ns_get_pages(ns, file, count, pos);
   1366	if (err)
   1367		return err;
   1368	noreclaim_flag = memalloc_noreclaim_save();
   1369	tx = kernel_write(file, buf, count, &pos);
   1370	memalloc_noreclaim_restore(noreclaim_flag);
   1371	ns_put_pages(ns);
   1372	return tx;
   1373}
   1374
   1375/*
   1376 * Returns a pointer to the current page.
   1377 */
   1378static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
   1379{
   1380	return &(ns->pages[ns->regs.row]);
   1381}
   1382
   1383/*
   1384 * Retuns a pointer to the current byte, within the current page.
   1385 */
   1386static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
   1387{
   1388	return NS_GET_PAGE(ns)->byte + NS_PAGE_BYTE_SHIFT(ns);
   1389}
   1390
   1391static int ns_do_read_error(struct nandsim *ns, int num)
   1392{
   1393	unsigned int page_no = ns->regs.row;
   1394
   1395	if (ns_read_error(page_no)) {
   1396		prandom_bytes(ns->buf.byte, num);
   1397		NS_WARN("simulating read error in page %u\n", page_no);
   1398		return 1;
   1399	}
   1400	return 0;
   1401}
   1402
   1403static void ns_do_bit_flips(struct nandsim *ns, int num)
   1404{
   1405	if (bitflips && prandom_u32() < (1 << 22)) {
   1406		int flips = 1;
   1407		if (bitflips > 1)
   1408			flips = (prandom_u32() % (int) bitflips) + 1;
   1409		while (flips--) {
   1410			int pos = prandom_u32() % (num * 8);
   1411			ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
   1412			NS_WARN("read_page: flipping bit %d in page %d "
   1413				"reading from %d ecc: corrected=%u failed=%u\n",
   1414				pos, ns->regs.row, NS_PAGE_BYTE_SHIFT(ns),
   1415				nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
   1416		}
   1417	}
   1418}
   1419
   1420/*
   1421 * Fill the NAND buffer with data read from the specified page.
   1422 */
   1423static void ns_read_page(struct nandsim *ns, int num)
   1424{
   1425	union ns_mem *mypage;
   1426
   1427	if (ns->cfile) {
   1428		if (!test_bit(ns->regs.row, ns->pages_written)) {
   1429			NS_DBG("read_page: page %d not written\n", ns->regs.row);
   1430			memset(ns->buf.byte, 0xFF, num);
   1431		} else {
   1432			loff_t pos;
   1433			ssize_t tx;
   1434
   1435			NS_DBG("read_page: page %d written, reading from %d\n",
   1436				ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
   1437			if (ns_do_read_error(ns, num))
   1438				return;
   1439			pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
   1440			tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
   1441					  pos);
   1442			if (tx != num) {
   1443				NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
   1444				return;
   1445			}
   1446			ns_do_bit_flips(ns, num);
   1447		}
   1448		return;
   1449	}
   1450
   1451	mypage = NS_GET_PAGE(ns);
   1452	if (mypage->byte == NULL) {
   1453		NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
   1454		memset(ns->buf.byte, 0xFF, num);
   1455	} else {
   1456		NS_DBG("read_page: page %d allocated, reading from %d\n",
   1457			ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
   1458		if (ns_do_read_error(ns, num))
   1459			return;
   1460		memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
   1461		ns_do_bit_flips(ns, num);
   1462	}
   1463}
   1464
   1465/*
   1466 * Erase all pages in the specified sector.
   1467 */
   1468static void ns_erase_sector(struct nandsim *ns)
   1469{
   1470	union ns_mem *mypage;
   1471	int i;
   1472
   1473	if (ns->cfile) {
   1474		for (i = 0; i < ns->geom.pgsec; i++)
   1475			if (__test_and_clear_bit(ns->regs.row + i,
   1476						 ns->pages_written)) {
   1477				NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
   1478			}
   1479		return;
   1480	}
   1481
   1482	mypage = NS_GET_PAGE(ns);
   1483	for (i = 0; i < ns->geom.pgsec; i++) {
   1484		if (mypage->byte != NULL) {
   1485			NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
   1486			kmem_cache_free(ns->nand_pages_slab, mypage->byte);
   1487			mypage->byte = NULL;
   1488		}
   1489		mypage++;
   1490	}
   1491}
   1492
   1493/*
   1494 * Program the specified page with the contents from the NAND buffer.
   1495 */
   1496static int ns_prog_page(struct nandsim *ns, int num)
   1497{
   1498	int i;
   1499	union ns_mem *mypage;
   1500	u_char *pg_off;
   1501
   1502	if (ns->cfile) {
   1503		loff_t off;
   1504		ssize_t tx;
   1505		int all;
   1506
   1507		NS_DBG("prog_page: writing page %d\n", ns->regs.row);
   1508		pg_off = ns->file_buf + NS_PAGE_BYTE_SHIFT(ns);
   1509		off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
   1510		if (!test_bit(ns->regs.row, ns->pages_written)) {
   1511			all = 1;
   1512			memset(ns->file_buf, 0xff, ns->geom.pgszoob);
   1513		} else {
   1514			all = 0;
   1515			tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
   1516			if (tx != num) {
   1517				NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
   1518				return -1;
   1519			}
   1520		}
   1521		for (i = 0; i < num; i++)
   1522			pg_off[i] &= ns->buf.byte[i];
   1523		if (all) {
   1524			loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
   1525			tx = ns_write_file(ns, ns->cfile, ns->file_buf,
   1526					   ns->geom.pgszoob, pos);
   1527			if (tx != ns->geom.pgszoob) {
   1528				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
   1529				return -1;
   1530			}
   1531			__set_bit(ns->regs.row, ns->pages_written);
   1532		} else {
   1533			tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
   1534			if (tx != num) {
   1535				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
   1536				return -1;
   1537			}
   1538		}
   1539		return 0;
   1540	}
   1541
   1542	mypage = NS_GET_PAGE(ns);
   1543	if (mypage->byte == NULL) {
   1544		NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
   1545		/*
   1546		 * We allocate memory with GFP_NOFS because a flash FS may
   1547		 * utilize this. If it is holding an FS lock, then gets here,
   1548		 * then kernel memory alloc runs writeback which goes to the FS
   1549		 * again and deadlocks. This was seen in practice.
   1550		 */
   1551		mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
   1552		if (mypage->byte == NULL) {
   1553			NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
   1554			return -1;
   1555		}
   1556		memset(mypage->byte, 0xFF, ns->geom.pgszoob);
   1557	}
   1558
   1559	pg_off = NS_PAGE_BYTE_OFF(ns);
   1560	for (i = 0; i < num; i++)
   1561		pg_off[i] &= ns->buf.byte[i];
   1562
   1563	return 0;
   1564}
   1565
   1566/*
   1567 * If state has any action bit, perform this action.
   1568 *
   1569 * RETURNS: 0 if success, -1 if error.
   1570 */
   1571static int ns_do_state_action(struct nandsim *ns, uint32_t action)
   1572{
   1573	int num;
   1574	int busdiv = ns->busw == 8 ? 1 : 2;
   1575	unsigned int erase_block_no, page_no;
   1576
   1577	action &= ACTION_MASK;
   1578
   1579	/* Check that page address input is correct */
   1580	if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
   1581		NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
   1582		return -1;
   1583	}
   1584
   1585	switch (action) {
   1586
   1587	case ACTION_CPY:
   1588		/*
   1589		 * Copy page data to the internal buffer.
   1590		 */
   1591
   1592		/* Column shouldn't be very large */
   1593		if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
   1594			NS_ERR("do_state_action: column number is too large\n");
   1595			break;
   1596		}
   1597		num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
   1598		ns_read_page(ns, num);
   1599
   1600		NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
   1601			num, NS_RAW_OFFSET(ns) + ns->regs.off);
   1602
   1603		if (ns->regs.off == 0)
   1604			NS_LOG("read page %d\n", ns->regs.row);
   1605		else if (ns->regs.off < ns->geom.pgsz)
   1606			NS_LOG("read page %d (second half)\n", ns->regs.row);
   1607		else
   1608			NS_LOG("read OOB of page %d\n", ns->regs.row);
   1609
   1610		NS_UDELAY(access_delay);
   1611		NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
   1612
   1613		break;
   1614
   1615	case ACTION_SECERASE:
   1616		/*
   1617		 * Erase sector.
   1618		 */
   1619
   1620		if (ns->lines.wp) {
   1621			NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
   1622			return -1;
   1623		}
   1624
   1625		if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
   1626			|| (ns->regs.row & ~(ns->geom.secsz - 1))) {
   1627			NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
   1628			return -1;
   1629		}
   1630
   1631		ns->regs.row = (ns->regs.row <<
   1632				8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
   1633		ns->regs.column = 0;
   1634
   1635		erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
   1636
   1637		NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
   1638				ns->regs.row, NS_RAW_OFFSET(ns));
   1639		NS_LOG("erase sector %u\n", erase_block_no);
   1640
   1641		ns_erase_sector(ns);
   1642
   1643		NS_MDELAY(erase_delay);
   1644
   1645		if (erase_block_wear)
   1646			ns_update_wear(erase_block_no);
   1647
   1648		if (ns_erase_error(erase_block_no)) {
   1649			NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
   1650			return -1;
   1651		}
   1652
   1653		break;
   1654
   1655	case ACTION_PRGPAGE:
   1656		/*
   1657		 * Program page - move internal buffer data to the page.
   1658		 */
   1659
   1660		if (ns->lines.wp) {
   1661			NS_WARN("do_state_action: device is write-protected, programm\n");
   1662			return -1;
   1663		}
   1664
   1665		num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
   1666		if (num != ns->regs.count) {
   1667			NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
   1668					ns->regs.count, num);
   1669			return -1;
   1670		}
   1671
   1672		if (ns_prog_page(ns, num) == -1)
   1673			return -1;
   1674
   1675		page_no = ns->regs.row;
   1676
   1677		NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
   1678			num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
   1679		NS_LOG("programm page %d\n", ns->regs.row);
   1680
   1681		NS_UDELAY(programm_delay);
   1682		NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
   1683
   1684		if (ns_write_error(page_no)) {
   1685			NS_WARN("simulating write failure in page %u\n", page_no);
   1686			return -1;
   1687		}
   1688
   1689		break;
   1690
   1691	case ACTION_ZEROOFF:
   1692		NS_DBG("do_state_action: set internal offset to 0\n");
   1693		ns->regs.off = 0;
   1694		break;
   1695
   1696	case ACTION_HALFOFF:
   1697		if (!(ns->options & OPT_PAGE512_8BIT)) {
   1698			NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
   1699				"byte page size 8x chips\n");
   1700			return -1;
   1701		}
   1702		NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
   1703		ns->regs.off = ns->geom.pgsz/2;
   1704		break;
   1705
   1706	case ACTION_OOBOFF:
   1707		NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
   1708		ns->regs.off = ns->geom.pgsz;
   1709		break;
   1710
   1711	default:
   1712		NS_DBG("do_state_action: BUG! unknown action\n");
   1713	}
   1714
   1715	return 0;
   1716}
   1717
   1718/*
   1719 * Switch simulator's state.
   1720 */
   1721static void ns_switch_state(struct nandsim *ns)
   1722{
   1723	if (ns->op) {
   1724		/*
   1725		 * The current operation have already been identified.
   1726		 * Just follow the states chain.
   1727		 */
   1728
   1729		ns->stateidx += 1;
   1730		ns->state = ns->nxstate;
   1731		ns->nxstate = ns->op[ns->stateidx + 1];
   1732
   1733		NS_DBG("switch_state: operation is known, switch to the next state, "
   1734			"state: %s, nxstate: %s\n",
   1735		       ns_get_state_name(ns->state),
   1736		       ns_get_state_name(ns->nxstate));
   1737	} else {
   1738		/*
   1739		 * We don't yet know which operation we perform.
   1740		 * Try to identify it.
   1741		 */
   1742
   1743		/*
   1744		 *  The only event causing the switch_state function to
   1745		 *  be called with yet unknown operation is new command.
   1746		 */
   1747		ns->state = ns_get_state_by_command(ns->regs.command);
   1748
   1749		NS_DBG("switch_state: operation is unknown, try to find it\n");
   1750
   1751		if (ns_find_operation(ns, 0))
   1752			return;
   1753	}
   1754
   1755	/* See, whether we need to do some action */
   1756	if ((ns->state & ACTION_MASK) &&
   1757	    ns_do_state_action(ns, ns->state) < 0) {
   1758		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   1759		return;
   1760	}
   1761
   1762	/* For 16x devices column means the page offset in words */
   1763	if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
   1764		NS_DBG("switch_state: double the column number for 16x device\n");
   1765		ns->regs.column <<= 1;
   1766	}
   1767
   1768	if (NS_STATE(ns->nxstate) == STATE_READY) {
   1769		/*
   1770		 * The current state is the last. Return to STATE_READY
   1771		 */
   1772
   1773		u_char status = NS_STATUS_OK(ns);
   1774
   1775		/* In case of data states, see if all bytes were input/output */
   1776		if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
   1777			&& ns->regs.count != ns->regs.num) {
   1778			NS_WARN("switch_state: not all bytes were processed, %d left\n",
   1779					ns->regs.num - ns->regs.count);
   1780			status = NS_STATUS_FAILED(ns);
   1781		}
   1782
   1783		NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
   1784
   1785		ns_switch_to_ready_state(ns, status);
   1786
   1787		return;
   1788	} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
   1789		/*
   1790		 * If the next state is data input/output, switch to it now
   1791		 */
   1792
   1793		ns->state      = ns->nxstate;
   1794		ns->nxstate    = ns->op[++ns->stateidx + 1];
   1795		ns->regs.num   = ns->regs.count = 0;
   1796
   1797		NS_DBG("switch_state: the next state is data I/O, switch, "
   1798			"state: %s, nxstate: %s\n",
   1799		       ns_get_state_name(ns->state),
   1800		       ns_get_state_name(ns->nxstate));
   1801
   1802		/*
   1803		 * Set the internal register to the count of bytes which
   1804		 * are expected to be input or output
   1805		 */
   1806		switch (NS_STATE(ns->state)) {
   1807			case STATE_DATAIN:
   1808			case STATE_DATAOUT:
   1809				ns->regs.num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
   1810				break;
   1811
   1812			case STATE_DATAOUT_ID:
   1813				ns->regs.num = ns->geom.idbytes;
   1814				break;
   1815
   1816			case STATE_DATAOUT_STATUS:
   1817				ns->regs.count = ns->regs.num = 0;
   1818				break;
   1819
   1820			default:
   1821				NS_ERR("switch_state: BUG! unknown data state\n");
   1822		}
   1823
   1824	} else if (ns->nxstate & STATE_ADDR_MASK) {
   1825		/*
   1826		 * If the next state is address input, set the internal
   1827		 * register to the number of expected address bytes
   1828		 */
   1829
   1830		ns->regs.count = 0;
   1831
   1832		switch (NS_STATE(ns->nxstate)) {
   1833			case STATE_ADDR_PAGE:
   1834				ns->regs.num = ns->geom.pgaddrbytes;
   1835
   1836				break;
   1837			case STATE_ADDR_SEC:
   1838				ns->regs.num = ns->geom.secaddrbytes;
   1839				break;
   1840
   1841			case STATE_ADDR_ZERO:
   1842				ns->regs.num = 1;
   1843				break;
   1844
   1845			case STATE_ADDR_COLUMN:
   1846				/* Column address is always 2 bytes */
   1847				ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
   1848				break;
   1849
   1850			default:
   1851				NS_ERR("switch_state: BUG! unknown address state\n");
   1852		}
   1853	} else {
   1854		/*
   1855		 * Just reset internal counters.
   1856		 */
   1857
   1858		ns->regs.num = 0;
   1859		ns->regs.count = 0;
   1860	}
   1861}
   1862
   1863static u_char ns_nand_read_byte(struct nand_chip *chip)
   1864{
   1865	struct nandsim *ns = nand_get_controller_data(chip);
   1866	u_char outb = 0x00;
   1867
   1868	/* Sanity and correctness checks */
   1869	if (!ns->lines.ce) {
   1870		NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
   1871		return outb;
   1872	}
   1873	if (ns->lines.ale || ns->lines.cle) {
   1874		NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
   1875		return outb;
   1876	}
   1877	if (!(ns->state & STATE_DATAOUT_MASK)) {
   1878		NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
   1879			ns_get_state_name(ns->state), (uint)outb);
   1880		return outb;
   1881	}
   1882
   1883	/* Status register may be read as many times as it is wanted */
   1884	if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
   1885		NS_DBG("read_byte: return %#x status\n", ns->regs.status);
   1886		return ns->regs.status;
   1887	}
   1888
   1889	/* Check if there is any data in the internal buffer which may be read */
   1890	if (ns->regs.count == ns->regs.num) {
   1891		NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
   1892		return outb;
   1893	}
   1894
   1895	switch (NS_STATE(ns->state)) {
   1896		case STATE_DATAOUT:
   1897			if (ns->busw == 8) {
   1898				outb = ns->buf.byte[ns->regs.count];
   1899				ns->regs.count += 1;
   1900			} else {
   1901				outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
   1902				ns->regs.count += 2;
   1903			}
   1904			break;
   1905		case STATE_DATAOUT_ID:
   1906			NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
   1907			outb = ns->ids[ns->regs.count];
   1908			ns->regs.count += 1;
   1909			break;
   1910		default:
   1911			BUG();
   1912	}
   1913
   1914	if (ns->regs.count == ns->regs.num) {
   1915		NS_DBG("read_byte: all bytes were read\n");
   1916
   1917		if (NS_STATE(ns->nxstate) == STATE_READY)
   1918			ns_switch_state(ns);
   1919	}
   1920
   1921	return outb;
   1922}
   1923
   1924static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
   1925{
   1926	struct nandsim *ns = nand_get_controller_data(chip);
   1927
   1928	/* Sanity and correctness checks */
   1929	if (!ns->lines.ce) {
   1930		NS_ERR("write_byte: chip is disabled, ignore write\n");
   1931		return;
   1932	}
   1933	if (ns->lines.ale && ns->lines.cle) {
   1934		NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
   1935		return;
   1936	}
   1937
   1938	if (ns->lines.cle == 1) {
   1939		/*
   1940		 * The byte written is a command.
   1941		 */
   1942
   1943		if (byte == NAND_CMD_RESET) {
   1944			NS_LOG("reset chip\n");
   1945			ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
   1946			return;
   1947		}
   1948
   1949		/* Check that the command byte is correct */
   1950		if (ns_check_command(byte)) {
   1951			NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
   1952			return;
   1953		}
   1954
   1955		if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
   1956			|| NS_STATE(ns->state) == STATE_DATAOUT) {
   1957			int row = ns->regs.row;
   1958
   1959			ns_switch_state(ns);
   1960			if (byte == NAND_CMD_RNDOUT)
   1961				ns->regs.row = row;
   1962		}
   1963
   1964		/* Check if chip is expecting command */
   1965		if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
   1966			/* Do not warn if only 2 id bytes are read */
   1967			if (!(ns->regs.command == NAND_CMD_READID &&
   1968			    NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
   1969				/*
   1970				 * We are in situation when something else (not command)
   1971				 * was expected but command was input. In this case ignore
   1972				 * previous command(s)/state(s) and accept the last one.
   1973				 */
   1974				NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
   1975					(uint)byte,
   1976					ns_get_state_name(ns->nxstate));
   1977			}
   1978			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   1979		}
   1980
   1981		NS_DBG("command byte corresponding to %s state accepted\n",
   1982			ns_get_state_name(ns_get_state_by_command(byte)));
   1983		ns->regs.command = byte;
   1984		ns_switch_state(ns);
   1985
   1986	} else if (ns->lines.ale == 1) {
   1987		/*
   1988		 * The byte written is an address.
   1989		 */
   1990
   1991		if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
   1992
   1993			NS_DBG("write_byte: operation isn't known yet, identify it\n");
   1994
   1995			if (ns_find_operation(ns, 1) < 0)
   1996				return;
   1997
   1998			if ((ns->state & ACTION_MASK) &&
   1999			    ns_do_state_action(ns, ns->state) < 0) {
   2000				ns_switch_to_ready_state(ns,
   2001							 NS_STATUS_FAILED(ns));
   2002				return;
   2003			}
   2004
   2005			ns->regs.count = 0;
   2006			switch (NS_STATE(ns->nxstate)) {
   2007				case STATE_ADDR_PAGE:
   2008					ns->regs.num = ns->geom.pgaddrbytes;
   2009					break;
   2010				case STATE_ADDR_SEC:
   2011					ns->regs.num = ns->geom.secaddrbytes;
   2012					break;
   2013				case STATE_ADDR_ZERO:
   2014					ns->regs.num = 1;
   2015					break;
   2016				default:
   2017					BUG();
   2018			}
   2019		}
   2020
   2021		/* Check that chip is expecting address */
   2022		if (!(ns->nxstate & STATE_ADDR_MASK)) {
   2023			NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
   2024			       (uint)byte, ns_get_state_name(ns->nxstate));
   2025			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   2026			return;
   2027		}
   2028
   2029		/* Check if this is expected byte */
   2030		if (ns->regs.count == ns->regs.num) {
   2031			NS_ERR("write_byte: no more address bytes expected\n");
   2032			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   2033			return;
   2034		}
   2035
   2036		ns_accept_addr_byte(ns, byte);
   2037
   2038		ns->regs.count += 1;
   2039
   2040		NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
   2041				(uint)byte, ns->regs.count, ns->regs.num);
   2042
   2043		if (ns->regs.count == ns->regs.num) {
   2044			NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
   2045			ns_switch_state(ns);
   2046		}
   2047
   2048	} else {
   2049		/*
   2050		 * The byte written is an input data.
   2051		 */
   2052
   2053		/* Check that chip is expecting data input */
   2054		if (!(ns->state & STATE_DATAIN_MASK)) {
   2055			NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
   2056			       (uint)byte, ns_get_state_name(ns->state),
   2057			       ns_get_state_name(STATE_READY));
   2058			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   2059			return;
   2060		}
   2061
   2062		/* Check if this is expected byte */
   2063		if (ns->regs.count == ns->regs.num) {
   2064			NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
   2065					ns->regs.num);
   2066			return;
   2067		}
   2068
   2069		if (ns->busw == 8) {
   2070			ns->buf.byte[ns->regs.count] = byte;
   2071			ns->regs.count += 1;
   2072		} else {
   2073			ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
   2074			ns->regs.count += 2;
   2075		}
   2076	}
   2077
   2078	return;
   2079}
   2080
   2081static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
   2082			      int len)
   2083{
   2084	struct nandsim *ns = nand_get_controller_data(chip);
   2085
   2086	/* Check that chip is expecting data input */
   2087	if (!(ns->state & STATE_DATAIN_MASK)) {
   2088		NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
   2089		       ns_get_state_name(ns->state));
   2090		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   2091		return;
   2092	}
   2093
   2094	/* Check if these are expected bytes */
   2095	if (ns->regs.count + len > ns->regs.num) {
   2096		NS_ERR("write_buf: too many input bytes\n");
   2097		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   2098		return;
   2099	}
   2100
   2101	memcpy(ns->buf.byte + ns->regs.count, buf, len);
   2102	ns->regs.count += len;
   2103
   2104	if (ns->regs.count == ns->regs.num) {
   2105		NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
   2106	}
   2107}
   2108
   2109static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
   2110{
   2111	struct nandsim *ns = nand_get_controller_data(chip);
   2112
   2113	/* Sanity and correctness checks */
   2114	if (!ns->lines.ce) {
   2115		NS_ERR("read_buf: chip is disabled\n");
   2116		return;
   2117	}
   2118	if (ns->lines.ale || ns->lines.cle) {
   2119		NS_ERR("read_buf: ALE or CLE pin is high\n");
   2120		return;
   2121	}
   2122	if (!(ns->state & STATE_DATAOUT_MASK)) {
   2123		NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
   2124			ns_get_state_name(ns->state));
   2125		return;
   2126	}
   2127
   2128	if (NS_STATE(ns->state) != STATE_DATAOUT) {
   2129		int i;
   2130
   2131		for (i = 0; i < len; i++)
   2132			buf[i] = ns_nand_read_byte(chip);
   2133
   2134		return;
   2135	}
   2136
   2137	/* Check if these are expected bytes */
   2138	if (ns->regs.count + len > ns->regs.num) {
   2139		NS_ERR("read_buf: too many bytes to read\n");
   2140		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
   2141		return;
   2142	}
   2143
   2144	memcpy(buf, ns->buf.byte + ns->regs.count, len);
   2145	ns->regs.count += len;
   2146
   2147	if (ns->regs.count == ns->regs.num) {
   2148		if (NS_STATE(ns->nxstate) == STATE_READY)
   2149			ns_switch_state(ns);
   2150	}
   2151
   2152	return;
   2153}
   2154
   2155static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
   2156		      bool check_only)
   2157{
   2158	int i;
   2159	unsigned int op_id;
   2160	const struct nand_op_instr *instr = NULL;
   2161	struct nandsim *ns = nand_get_controller_data(chip);
   2162
   2163	if (check_only)
   2164		return 0;
   2165
   2166	ns->lines.ce = 1;
   2167
   2168	for (op_id = 0; op_id < op->ninstrs; op_id++) {
   2169		instr = &op->instrs[op_id];
   2170		ns->lines.cle = 0;
   2171		ns->lines.ale = 0;
   2172
   2173		switch (instr->type) {
   2174		case NAND_OP_CMD_INSTR:
   2175			ns->lines.cle = 1;
   2176			ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
   2177			break;
   2178		case NAND_OP_ADDR_INSTR:
   2179			ns->lines.ale = 1;
   2180			for (i = 0; i < instr->ctx.addr.naddrs; i++)
   2181				ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
   2182			break;
   2183		case NAND_OP_DATA_IN_INSTR:
   2184			ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
   2185			break;
   2186		case NAND_OP_DATA_OUT_INSTR:
   2187			ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
   2188			break;
   2189		case NAND_OP_WAITRDY_INSTR:
   2190			/* we are always ready */
   2191			break;
   2192		}
   2193	}
   2194
   2195	return 0;
   2196}
   2197
   2198static int ns_attach_chip(struct nand_chip *chip)
   2199{
   2200	unsigned int eccsteps, eccbytes;
   2201
   2202	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
   2203	chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
   2204
   2205	if (!bch)
   2206		return 0;
   2207
   2208	if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
   2209		NS_ERR("BCH ECC support is disabled\n");
   2210		return -EINVAL;
   2211	}
   2212
   2213	/* Use 512-byte ecc blocks */
   2214	eccsteps = nsmtd->writesize / 512;
   2215	eccbytes = ((bch * 13) + 7) / 8;
   2216
   2217	/* Do not bother supporting small page devices */
   2218	if (nsmtd->oobsize < 64 || !eccsteps) {
   2219		NS_ERR("BCH not available on small page devices\n");
   2220		return -EINVAL;
   2221	}
   2222
   2223	if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
   2224		NS_ERR("Invalid BCH value %u\n", bch);
   2225		return -EINVAL;
   2226	}
   2227
   2228	chip->ecc.size = 512;
   2229	chip->ecc.strength = bch;
   2230	chip->ecc.bytes = eccbytes;
   2231
   2232	NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
   2233
   2234	return 0;
   2235}
   2236
   2237static const struct nand_controller_ops ns_controller_ops = {
   2238	.attach_chip = ns_attach_chip,
   2239	.exec_op = ns_exec_op,
   2240};
   2241
   2242/*
   2243 * Module initialization function
   2244 */
   2245static int __init ns_init_module(void)
   2246{
   2247	struct list_head *pos, *n;
   2248	struct nand_chip *chip;
   2249	struct nandsim *ns;
   2250	int ret;
   2251
   2252	if (bus_width != 8 && bus_width != 16) {
   2253		NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
   2254		return -EINVAL;
   2255	}
   2256
   2257	ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
   2258	if (!ns) {
   2259		NS_ERR("unable to allocate core structures.\n");
   2260		return -ENOMEM;
   2261	}
   2262	chip	    = &ns->chip;
   2263	nsmtd       = nand_to_mtd(chip);
   2264	nand_set_controller_data(chip, (void *)ns);
   2265
   2266	/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
   2267	/* and 'badblocks' parameters to work */
   2268	chip->options   |= NAND_SKIP_BBTSCAN;
   2269
   2270	switch (bbt) {
   2271	case 2:
   2272		chip->bbt_options |= NAND_BBT_NO_OOB;
   2273		fallthrough;
   2274	case 1:
   2275		chip->bbt_options |= NAND_BBT_USE_FLASH;
   2276		fallthrough;
   2277	case 0:
   2278		break;
   2279	default:
   2280		NS_ERR("bbt has to be 0..2\n");
   2281		ret = -EINVAL;
   2282		goto free_ns_struct;
   2283	}
   2284	/*
   2285	 * Perform minimum nandsim structure initialization to handle
   2286	 * the initial ID read command correctly
   2287	 */
   2288	if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
   2289		ns->geom.idbytes = 8;
   2290	else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
   2291		ns->geom.idbytes = 6;
   2292	else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
   2293		ns->geom.idbytes = 4;
   2294	else
   2295		ns->geom.idbytes = 2;
   2296	ns->regs.status = NS_STATUS_OK(ns);
   2297	ns->nxstate = STATE_UNKNOWN;
   2298	ns->options |= OPT_PAGE512; /* temporary value */
   2299	memcpy(ns->ids, id_bytes, sizeof(ns->ids));
   2300	if (bus_width == 16) {
   2301		ns->busw = 16;
   2302		chip->options |= NAND_BUSWIDTH_16;
   2303	}
   2304
   2305	nsmtd->owner = THIS_MODULE;
   2306
   2307	ret = ns_parse_weakblocks();
   2308	if (ret)
   2309		goto free_ns_struct;
   2310
   2311	ret = ns_parse_weakpages();
   2312	if (ret)
   2313		goto free_wb_list;
   2314
   2315	ret = ns_parse_gravepages();
   2316	if (ret)
   2317		goto free_wp_list;
   2318
   2319	nand_controller_init(&ns->base);
   2320	ns->base.ops = &ns_controller_ops;
   2321	chip->controller = &ns->base;
   2322
   2323	ret = nand_scan(chip, 1);
   2324	if (ret) {
   2325		NS_ERR("Could not scan NAND Simulator device\n");
   2326		goto free_gp_list;
   2327	}
   2328
   2329	if (overridesize) {
   2330		uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
   2331		struct nand_memory_organization *memorg;
   2332		u64 targetsize;
   2333
   2334		memorg = nanddev_get_memorg(&chip->base);
   2335
   2336		if (new_size >> overridesize != nsmtd->erasesize) {
   2337			NS_ERR("overridesize is too big\n");
   2338			ret = -EINVAL;
   2339			goto cleanup_nand;
   2340		}
   2341
   2342		/* N.B. This relies on nand_scan not doing anything with the size before we change it */
   2343		nsmtd->size = new_size;
   2344		memorg->eraseblocks_per_lun = 1 << overridesize;
   2345		targetsize = nanddev_target_size(&chip->base);
   2346		chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
   2347		chip->pagemask = (targetsize >> chip->page_shift) - 1;
   2348	}
   2349
   2350	ret = ns_setup_wear_reporting(nsmtd);
   2351	if (ret)
   2352		goto cleanup_nand;
   2353
   2354	ret = ns_init(nsmtd);
   2355	if (ret)
   2356		goto free_ebw;
   2357
   2358	ret = nand_create_bbt(chip);
   2359	if (ret)
   2360		goto free_ns_object;
   2361
   2362	ret = ns_parse_badblocks(ns, nsmtd);
   2363	if (ret)
   2364		goto free_ns_object;
   2365
   2366	/* Register NAND partitions */
   2367	ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
   2368	if (ret)
   2369		goto free_ns_object;
   2370
   2371	ret = ns_debugfs_create(ns);
   2372	if (ret)
   2373		goto unregister_mtd;
   2374
   2375        return 0;
   2376
   2377unregister_mtd:
   2378	WARN_ON(mtd_device_unregister(nsmtd));
   2379free_ns_object:
   2380	ns_free(ns);
   2381free_ebw:
   2382	kfree(erase_block_wear);
   2383cleanup_nand:
   2384	nand_cleanup(chip);
   2385free_gp_list:
   2386	list_for_each_safe(pos, n, &grave_pages) {
   2387		list_del(pos);
   2388		kfree(list_entry(pos, struct grave_page, list));
   2389	}
   2390free_wp_list:
   2391	list_for_each_safe(pos, n, &weak_pages) {
   2392		list_del(pos);
   2393		kfree(list_entry(pos, struct weak_page, list));
   2394	}
   2395free_wb_list:
   2396	list_for_each_safe(pos, n, &weak_blocks) {
   2397		list_del(pos);
   2398		kfree(list_entry(pos, struct weak_block, list));
   2399	}
   2400free_ns_struct:
   2401	kfree(ns);
   2402
   2403	return ret;
   2404}
   2405
   2406module_init(ns_init_module);
   2407
   2408/*
   2409 * Module clean-up function
   2410 */
   2411static void __exit ns_cleanup_module(void)
   2412{
   2413	struct nand_chip *chip = mtd_to_nand(nsmtd);
   2414	struct nandsim *ns = nand_get_controller_data(chip);
   2415	struct list_head *pos, *n;
   2416
   2417	ns_debugfs_remove(ns);
   2418	WARN_ON(mtd_device_unregister(nsmtd));
   2419	ns_free(ns);
   2420	kfree(erase_block_wear);
   2421	nand_cleanup(chip);
   2422
   2423	list_for_each_safe(pos, n, &grave_pages) {
   2424		list_del(pos);
   2425		kfree(list_entry(pos, struct grave_page, list));
   2426	}
   2427
   2428	list_for_each_safe(pos, n, &weak_pages) {
   2429		list_del(pos);
   2430		kfree(list_entry(pos, struct weak_page, list));
   2431	}
   2432
   2433	list_for_each_safe(pos, n, &weak_blocks) {
   2434		list_del(pos);
   2435		kfree(list_entry(pos, struct weak_block, list));
   2436	}
   2437
   2438	kfree(ns);
   2439}
   2440
   2441module_exit(ns_cleanup_module);
   2442
   2443MODULE_LICENSE ("GPL");
   2444MODULE_AUTHOR ("Artem B. Bityuckiy");
   2445MODULE_DESCRIPTION ("The NAND flash simulator");