cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cfi_cmdset_0002.c (85692B)


      1/*
      2 * Common Flash Interface support:
      3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
      4 *
      5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
      6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
      7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
      8 *
      9 * 2_by_8 routines added by Simon Munton
     10 *
     11 * 4_by_16 work by Carolyn J. Smith
     12 *
     13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
     14 * by Nicolas Pitre)
     15 *
     16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
     17 *
     18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
     19 *
     20 * This code is GPL
     21 */
     22
     23#include <linux/module.h>
     24#include <linux/types.h>
     25#include <linux/kernel.h>
     26#include <linux/sched.h>
     27#include <asm/io.h>
     28#include <asm/byteorder.h>
     29
     30#include <linux/errno.h>
     31#include <linux/slab.h>
     32#include <linux/delay.h>
     33#include <linux/interrupt.h>
     34#include <linux/reboot.h>
     35#include <linux/of.h>
     36#include <linux/of_platform.h>
     37#include <linux/mtd/map.h>
     38#include <linux/mtd/mtd.h>
     39#include <linux/mtd/cfi.h>
     40#include <linux/mtd/xip.h>
     41
     42#define AMD_BOOTLOC_BUG
     43#define FORCE_WORD_WRITE 0
     44
     45#define MAX_RETRIES 3
     46
     47#define SST49LF004B		0x0060
     48#define SST49LF040B		0x0050
     49#define SST49LF008A		0x005a
     50#define AT49BV6416		0x00d6
     51#define S29GL064N_MN12		0x0c01
     52
     53/*
     54 * Status Register bit description. Used by flash devices that don't
     55 * support DQ polling (e.g. HyperFlash)
     56 */
     57#define CFI_SR_DRB		BIT(7)
     58#define CFI_SR_ESB		BIT(5)
     59#define CFI_SR_PSB		BIT(4)
     60#define CFI_SR_WBASB		BIT(3)
     61#define CFI_SR_SLSB		BIT(1)
     62
     63enum cfi_quirks {
     64	CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
     65};
     66
     67static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
     68static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
     69#if !FORCE_WORD_WRITE
     70static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
     71#endif
     72static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
     73static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
     74static void cfi_amdstd_sync (struct mtd_info *);
     75static int cfi_amdstd_suspend (struct mtd_info *);
     76static void cfi_amdstd_resume (struct mtd_info *);
     77static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
     78static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
     79					 size_t *, struct otp_info *);
     80static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
     81					 size_t *, struct otp_info *);
     82static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
     83static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
     84					 size_t *, u_char *);
     85static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
     86					 size_t *, u_char *);
     87static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
     88					  size_t *, const u_char *);
     89static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
     90
     91static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
     92				  size_t *retlen, const u_char *buf);
     93
     94static void cfi_amdstd_destroy(struct mtd_info *);
     95
     96struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
     97static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
     98
     99static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
    100static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
    101#include "fwh_lock.h"
    102
    103static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
    104static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
    105
    106static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
    107static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
    108static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
    109
    110static struct mtd_chip_driver cfi_amdstd_chipdrv = {
    111	.probe		= NULL, /* Not usable directly */
    112	.destroy	= cfi_amdstd_destroy,
    113	.name		= "cfi_cmdset_0002",
    114	.module		= THIS_MODULE
    115};
    116
    117/*
    118 * Use status register to poll for Erase/write completion when DQ is not
    119 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
    120 * CFI Primary Vendor-Specific Extended Query table 1.5
    121 */
    122static int cfi_use_status_reg(struct cfi_private *cfi)
    123{
    124	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
    125	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
    126
    127	return extp && extp->MinorVersion >= '5' &&
    128		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
    129}
    130
    131static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
    132				unsigned long adr)
    133{
    134	struct cfi_private *cfi = map->fldrv_priv;
    135	map_word status;
    136
    137	if (!cfi_use_status_reg(cfi))
    138		return 0;
    139
    140	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
    141			 cfi->device_type, NULL);
    142	status = map_read(map, adr);
    143
    144	/* The error bits are invalid while the chip's busy */
    145	if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
    146		return 0;
    147
    148	if (map_word_bitsset(map, status, CMD(0x3a))) {
    149		unsigned long chipstatus = MERGESTATUS(status);
    150
    151		if (chipstatus & CFI_SR_ESB)
    152			pr_err("%s erase operation failed, status %lx\n",
    153			       map->name, chipstatus);
    154		if (chipstatus & CFI_SR_PSB)
    155			pr_err("%s program operation failed, status %lx\n",
    156			       map->name, chipstatus);
    157		if (chipstatus & CFI_SR_WBASB)
    158			pr_err("%s buffer program command aborted, status %lx\n",
    159			       map->name, chipstatus);
    160		if (chipstatus & CFI_SR_SLSB)
    161			pr_err("%s sector write protected, status %lx\n",
    162			       map->name, chipstatus);
    163
    164		/* Erase/Program status bits are set on the operation failure */
    165		if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
    166			return 1;
    167	}
    168	return 0;
    169}
    170
    171/* #define DEBUG_CFI_FEATURES */
    172
    173
    174#ifdef DEBUG_CFI_FEATURES
    175static void cfi_tell_features(struct cfi_pri_amdstd *extp)
    176{
    177	const char* erase_suspend[3] = {
    178		"Not supported", "Read only", "Read/write"
    179	};
    180	const char* top_bottom[6] = {
    181		"No WP", "8x8KiB sectors at top & bottom, no WP",
    182		"Bottom boot", "Top boot",
    183		"Uniform, Bottom WP", "Uniform, Top WP"
    184	};
    185
    186	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
    187	printk("  Address sensitive unlock: %s\n",
    188	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
    189
    190	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
    191		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
    192	else
    193		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
    194
    195	if (extp->BlkProt == 0)
    196		printk("  Block protection: Not supported\n");
    197	else
    198		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
    199
    200
    201	printk("  Temporary block unprotect: %s\n",
    202	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
    203	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
    204	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
    205	printk("  Burst mode: %s\n",
    206	       extp->BurstMode ? "Supported" : "Not supported");
    207	if (extp->PageMode == 0)
    208		printk("  Page mode: Not supported\n");
    209	else
    210		printk("  Page mode: %d word page\n", extp->PageMode << 2);
    211
    212	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
    213	       extp->VppMin >> 4, extp->VppMin & 0xf);
    214	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
    215	       extp->VppMax >> 4, extp->VppMax & 0xf);
    216
    217	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
    218		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
    219	else
    220		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
    221}
    222#endif
    223
    224#ifdef AMD_BOOTLOC_BUG
    225/* Wheee. Bring me the head of someone at AMD. */
    226static void fixup_amd_bootblock(struct mtd_info *mtd)
    227{
    228	struct map_info *map = mtd->priv;
    229	struct cfi_private *cfi = map->fldrv_priv;
    230	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
    231	__u8 major = extp->MajorVersion;
    232	__u8 minor = extp->MinorVersion;
    233
    234	if (((major << 8) | minor) < 0x3131) {
    235		/* CFI version 1.0 => don't trust bootloc */
    236
    237		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
    238			map->name, cfi->mfr, cfi->id);
    239
    240		/* AFAICS all 29LV400 with a bottom boot block have a device ID
    241		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
    242		 * These were badly detected as they have the 0x80 bit set
    243		 * so treat them as a special case.
    244		 */
    245		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
    246
    247			/* Macronix added CFI to their 2nd generation
    248			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
    249			 * Fujitsu, Spansion, EON, ESI and older Macronix)
    250			 * has CFI.
    251			 *
    252			 * Therefore also check the manufacturer.
    253			 * This reduces the risk of false detection due to
    254			 * the 8-bit device ID.
    255			 */
    256			(cfi->mfr == CFI_MFR_MACRONIX)) {
    257			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
    258				" detected\n", map->name);
    259			extp->TopBottom = 2;	/* bottom boot */
    260		} else
    261		if (cfi->id & 0x80) {
    262			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
    263			extp->TopBottom = 3;	/* top boot */
    264		} else {
    265			extp->TopBottom = 2;	/* bottom boot */
    266		}
    267
    268		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
    269			" deduced %s from Device ID\n", map->name, major, minor,
    270			extp->TopBottom == 2 ? "bottom" : "top");
    271	}
    272}
    273#endif
    274
    275#if !FORCE_WORD_WRITE
    276static void fixup_use_write_buffers(struct mtd_info *mtd)
    277{
    278	struct map_info *map = mtd->priv;
    279	struct cfi_private *cfi = map->fldrv_priv;
    280
    281	if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
    282		return;
    283
    284	if (cfi->cfiq->BufWriteTimeoutTyp) {
    285		pr_debug("Using buffer write method\n");
    286		mtd->_write = cfi_amdstd_write_buffers;
    287	}
    288}
    289#endif /* !FORCE_WORD_WRITE */
    290
    291/* Atmel chips don't use the same PRI format as AMD chips */
    292static void fixup_convert_atmel_pri(struct mtd_info *mtd)
    293{
    294	struct map_info *map = mtd->priv;
    295	struct cfi_private *cfi = map->fldrv_priv;
    296	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
    297	struct cfi_pri_atmel atmel_pri;
    298
    299	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
    300	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
    301
    302	if (atmel_pri.Features & 0x02)
    303		extp->EraseSuspend = 2;
    304
    305	/* Some chips got it backwards... */
    306	if (cfi->id == AT49BV6416) {
    307		if (atmel_pri.BottomBoot)
    308			extp->TopBottom = 3;
    309		else
    310			extp->TopBottom = 2;
    311	} else {
    312		if (atmel_pri.BottomBoot)
    313			extp->TopBottom = 2;
    314		else
    315			extp->TopBottom = 3;
    316	}
    317
    318	/* burst write mode not supported */
    319	cfi->cfiq->BufWriteTimeoutTyp = 0;
    320	cfi->cfiq->BufWriteTimeoutMax = 0;
    321}
    322
    323static void fixup_use_secsi(struct mtd_info *mtd)
    324{
    325	/* Setup for chips with a secsi area */
    326	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
    327	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
    328}
    329
    330static void fixup_use_erase_chip(struct mtd_info *mtd)
    331{
    332	struct map_info *map = mtd->priv;
    333	struct cfi_private *cfi = map->fldrv_priv;
    334	if ((cfi->cfiq->NumEraseRegions == 1) &&
    335		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
    336		mtd->_erase = cfi_amdstd_erase_chip;
    337	}
    338
    339}
    340
    341/*
    342 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
    343 * locked by default.
    344 */
    345static void fixup_use_atmel_lock(struct mtd_info *mtd)
    346{
    347	mtd->_lock = cfi_atmel_lock;
    348	mtd->_unlock = cfi_atmel_unlock;
    349	mtd->flags |= MTD_POWERUP_LOCK;
    350}
    351
    352static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
    353{
    354	struct map_info *map = mtd->priv;
    355	struct cfi_private *cfi = map->fldrv_priv;
    356
    357	/*
    358	 * These flashes report two separate eraseblock regions based on the
    359	 * sector_erase-size and block_erase-size, although they both operate on the
    360	 * same memory. This is not allowed according to CFI, so we just pick the
    361	 * sector_erase-size.
    362	 */
    363	cfi->cfiq->NumEraseRegions = 1;
    364}
    365
    366static void fixup_sst39vf(struct mtd_info *mtd)
    367{
    368	struct map_info *map = mtd->priv;
    369	struct cfi_private *cfi = map->fldrv_priv;
    370
    371	fixup_old_sst_eraseregion(mtd);
    372
    373	cfi->addr_unlock1 = 0x5555;
    374	cfi->addr_unlock2 = 0x2AAA;
    375}
    376
    377static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
    378{
    379	struct map_info *map = mtd->priv;
    380	struct cfi_private *cfi = map->fldrv_priv;
    381
    382	fixup_old_sst_eraseregion(mtd);
    383
    384	cfi->addr_unlock1 = 0x555;
    385	cfi->addr_unlock2 = 0x2AA;
    386
    387	cfi->sector_erase_cmd = CMD(0x50);
    388}
    389
    390static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
    391{
    392	struct map_info *map = mtd->priv;
    393	struct cfi_private *cfi = map->fldrv_priv;
    394
    395	fixup_sst39vf_rev_b(mtd);
    396
    397	/*
    398	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
    399	 * it should report a size of 8KBytes (0x0020*256).
    400	 */
    401	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
    402	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
    403		mtd->name);
    404}
    405
    406static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
    407{
    408	struct map_info *map = mtd->priv;
    409	struct cfi_private *cfi = map->fldrv_priv;
    410
    411	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
    412		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
    413		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
    414			mtd->name);
    415	}
    416}
    417
    418static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
    419{
    420	struct map_info *map = mtd->priv;
    421	struct cfi_private *cfi = map->fldrv_priv;
    422
    423	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
    424		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
    425		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
    426			mtd->name);
    427	}
    428}
    429
    430static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
    431{
    432	struct map_info *map = mtd->priv;
    433	struct cfi_private *cfi = map->fldrv_priv;
    434
    435	/*
    436	 *  S29NS512P flash uses more than 8bits to report number of sectors,
    437	 * which is not permitted by CFI.
    438	 */
    439	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
    440	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
    441		mtd->name);
    442}
    443
    444static void fixup_quirks(struct mtd_info *mtd)
    445{
    446	struct map_info *map = mtd->priv;
    447	struct cfi_private *cfi = map->fldrv_priv;
    448
    449	if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12)
    450		cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
    451}
    452
    453/* Used to fix CFI-Tables of chips without Extended Query Tables */
    454static struct cfi_fixup cfi_nopri_fixup_table[] = {
    455	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
    456	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
    457	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
    458	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
    459	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
    460	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
    461	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
    462	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
    463	{ 0, 0, NULL }
    464};
    465
    466static struct cfi_fixup cfi_fixup_table[] = {
    467	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
    468#ifdef AMD_BOOTLOC_BUG
    469	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
    470	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
    471	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
    472#endif
    473	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
    474	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
    475	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
    476	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
    477	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
    478	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
    479	{ CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors },
    480	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
    481	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
    482	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
    483	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
    484	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
    485	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
    486	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
    487	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
    488#if !FORCE_WORD_WRITE
    489	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
    490#endif
    491	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
    492	{ 0, 0, NULL }
    493};
    494static struct cfi_fixup jedec_fixup_table[] = {
    495	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
    496	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
    497	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
    498	{ 0, 0, NULL }
    499};
    500
    501static struct cfi_fixup fixup_table[] = {
    502	/* The CFI vendor ids and the JEDEC vendor IDs appear
    503	 * to be common.  It is like the devices id's are as
    504	 * well.  This table is to pick all cases where
    505	 * we know that is the case.
    506	 */
    507	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
    508	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
    509	{ 0, 0, NULL }
    510};
    511
    512
    513static void cfi_fixup_major_minor(struct cfi_private *cfi,
    514				  struct cfi_pri_amdstd *extp)
    515{
    516	if (cfi->mfr == CFI_MFR_SAMSUNG) {
    517		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
    518		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
    519			/*
    520			 * Samsung K8P2815UQB and K8D6x16UxM chips
    521			 * report major=0 / minor=0.
    522			 * K8D3x16UxC chips report major=3 / minor=3.
    523			 */
    524			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
    525			       " Extended Query version to 1.%c\n",
    526			       extp->MinorVersion);
    527			extp->MajorVersion = '1';
    528		}
    529	}
    530
    531	/*
    532	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
    533	 */
    534	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
    535		extp->MajorVersion = '1';
    536		extp->MinorVersion = '0';
    537	}
    538}
    539
    540static int is_m29ew(struct cfi_private *cfi)
    541{
    542	if (cfi->mfr == CFI_MFR_INTEL &&
    543	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
    544	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
    545		return 1;
    546	return 0;
    547}
    548
    549/*
    550 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
    551 * Some revisions of the M29EW suffer from erase suspend hang ups. In
    552 * particular, it can occur when the sequence
    553 * Erase Confirm -> Suspend -> Program -> Resume
    554 * causes a lockup due to internal timing issues. The consequence is that the
    555 * erase cannot be resumed without inserting a dummy command after programming
    556 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
    557 * that writes an F0 command code before the RESUME command.
    558 */
    559static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
    560					  unsigned long adr)
    561{
    562	struct cfi_private *cfi = map->fldrv_priv;
    563	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
    564	if (is_m29ew(cfi))
    565		map_write(map, CMD(0xF0), adr);
    566}
    567
    568/*
    569 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
    570 *
    571 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
    572 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
    573 * command is issued after an ERASE RESUME operation without waiting for a
    574 * minimum delay.  The result is that once the ERASE seems to be completed
    575 * (no bits are toggling), the contents of the Flash memory block on which
    576 * the erase was ongoing could be inconsistent with the expected values
    577 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
    578 * values), causing a consequent failure of the ERASE operation.
    579 * The occurrence of this issue could be high, especially when file system
    580 * operations on the Flash are intensive.  As a result, it is recommended
    581 * that a patch be applied.  Intensive file system operations can cause many
    582 * calls to the garbage routine to free Flash space (also by erasing physical
    583 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
    584 * commands can occur.  The problem disappears when a delay is inserted after
    585 * the RESUME command by using the udelay() function available in Linux.
    586 * The DELAY value must be tuned based on the customer's platform.
    587 * The maximum value that fixes the problem in all cases is 500us.
    588 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
    589 * in most cases.
    590 * We have chosen 500µs because this latency is acceptable.
    591 */
    592static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
    593{
    594	/*
    595	 * Resolving the Delay After Resume Issue see Micron TN-13-07
    596	 * Worst case delay must be 500µs but 30-50µs should be ok as well
    597	 */
    598	if (is_m29ew(cfi))
    599		cfi_udelay(500);
    600}
    601
    602struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
    603{
    604	struct cfi_private *cfi = map->fldrv_priv;
    605	struct device_node __maybe_unused *np = map->device_node;
    606	struct mtd_info *mtd;
    607	int i;
    608
    609	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
    610	if (!mtd)
    611		return NULL;
    612	mtd->priv = map;
    613	mtd->type = MTD_NORFLASH;
    614
    615	/* Fill in the default mtd operations */
    616	mtd->_erase   = cfi_amdstd_erase_varsize;
    617	mtd->_write   = cfi_amdstd_write_words;
    618	mtd->_read    = cfi_amdstd_read;
    619	mtd->_sync    = cfi_amdstd_sync;
    620	mtd->_suspend = cfi_amdstd_suspend;
    621	mtd->_resume  = cfi_amdstd_resume;
    622	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
    623	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
    624	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
    625	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
    626	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
    627	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
    628	mtd->flags   = MTD_CAP_NORFLASH;
    629	mtd->name    = map->name;
    630	mtd->writesize = 1;
    631	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
    632
    633	pr_debug("MTD %s(): write buffer size %d\n", __func__,
    634			mtd->writebufsize);
    635
    636	mtd->_panic_write = cfi_amdstd_panic_write;
    637	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
    638
    639	if (cfi->cfi_mode==CFI_MODE_CFI){
    640		unsigned char bootloc;
    641		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
    642		struct cfi_pri_amdstd *extp;
    643
    644		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
    645		if (extp) {
    646			/*
    647			 * It's a real CFI chip, not one for which the probe
    648			 * routine faked a CFI structure.
    649			 */
    650			cfi_fixup_major_minor(cfi, extp);
    651
    652			/*
    653			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
    654			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
    655			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
    656			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
    657			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
    658			 */
    659			if (extp->MajorVersion != '1' ||
    660			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
    661				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
    662				       "version %c.%c (%#02x/%#02x).\n",
    663				       extp->MajorVersion, extp->MinorVersion,
    664				       extp->MajorVersion, extp->MinorVersion);
    665				kfree(extp);
    666				kfree(mtd);
    667				return NULL;
    668			}
    669
    670			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
    671			       extp->MajorVersion, extp->MinorVersion);
    672
    673			/* Install our own private info structure */
    674			cfi->cmdset_priv = extp;
    675
    676			/* Apply cfi device specific fixups */
    677			cfi_fixup(mtd, cfi_fixup_table);
    678
    679#ifdef DEBUG_CFI_FEATURES
    680			/* Tell the user about it in lots of lovely detail */
    681			cfi_tell_features(extp);
    682#endif
    683
    684#ifdef CONFIG_OF
    685			if (np && of_property_read_bool(
    686				    np, "use-advanced-sector-protection")
    687			    && extp->BlkProtUnprot == 8) {
    688				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
    689				mtd->_lock = cfi_ppb_lock;
    690				mtd->_unlock = cfi_ppb_unlock;
    691				mtd->_is_locked = cfi_ppb_is_locked;
    692			}
    693#endif
    694
    695			bootloc = extp->TopBottom;
    696			if ((bootloc < 2) || (bootloc > 5)) {
    697				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
    698				       "bank location (%d). Assuming bottom.\n",
    699				       map->name, bootloc);
    700				bootloc = 2;
    701			}
    702
    703			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
    704				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
    705
    706				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
    707					int j = (cfi->cfiq->NumEraseRegions-1)-i;
    708
    709					swap(cfi->cfiq->EraseRegionInfo[i],
    710					     cfi->cfiq->EraseRegionInfo[j]);
    711				}
    712			}
    713			/* Set the default CFI lock/unlock addresses */
    714			cfi->addr_unlock1 = 0x555;
    715			cfi->addr_unlock2 = 0x2aa;
    716		}
    717		cfi_fixup(mtd, cfi_nopri_fixup_table);
    718
    719		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
    720			kfree(mtd);
    721			return NULL;
    722		}
    723
    724	} /* CFI mode */
    725	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
    726		/* Apply jedec specific fixups */
    727		cfi_fixup(mtd, jedec_fixup_table);
    728	}
    729	/* Apply generic fixups */
    730	cfi_fixup(mtd, fixup_table);
    731
    732	for (i=0; i< cfi->numchips; i++) {
    733		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
    734		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
    735		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
    736		/*
    737		 * First calculate the timeout max according to timeout field
    738		 * of struct cfi_ident that probed from chip's CFI aera, if
    739		 * available. Specify a minimum of 2000us, in case the CFI data
    740		 * is wrong.
    741		 */
    742		if (cfi->cfiq->BufWriteTimeoutTyp &&
    743		    cfi->cfiq->BufWriteTimeoutMax)
    744			cfi->chips[i].buffer_write_time_max =
    745				1 << (cfi->cfiq->BufWriteTimeoutTyp +
    746				      cfi->cfiq->BufWriteTimeoutMax);
    747		else
    748			cfi->chips[i].buffer_write_time_max = 0;
    749
    750		cfi->chips[i].buffer_write_time_max =
    751			max(cfi->chips[i].buffer_write_time_max, 2000);
    752
    753		cfi->chips[i].ref_point_counter = 0;
    754		init_waitqueue_head(&(cfi->chips[i].wq));
    755	}
    756
    757	map->fldrv = &cfi_amdstd_chipdrv;
    758
    759	return cfi_amdstd_setup(mtd);
    760}
    761struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
    762struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
    763EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
    764EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
    765EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
    766
    767static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
    768{
    769	struct map_info *map = mtd->priv;
    770	struct cfi_private *cfi = map->fldrv_priv;
    771	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
    772	unsigned long offset = 0;
    773	int i,j;
    774
    775	printk(KERN_NOTICE "number of %s chips: %d\n",
    776	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
    777	/* Select the correct geometry setup */
    778	mtd->size = devsize * cfi->numchips;
    779
    780	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
    781	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
    782					  sizeof(struct mtd_erase_region_info),
    783					  GFP_KERNEL);
    784	if (!mtd->eraseregions)
    785		goto setup_err;
    786
    787	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
    788		unsigned long ernum, ersize;
    789		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
    790		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
    791
    792		if (mtd->erasesize < ersize) {
    793			mtd->erasesize = ersize;
    794		}
    795		for (j=0; j<cfi->numchips; j++) {
    796			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
    797			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
    798			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
    799		}
    800		offset += (ersize * ernum);
    801	}
    802	if (offset != devsize) {
    803		/* Argh */
    804		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
    805		goto setup_err;
    806	}
    807
    808	__module_get(THIS_MODULE);
    809	register_reboot_notifier(&mtd->reboot_notifier);
    810	return mtd;
    811
    812 setup_err:
    813	kfree(mtd->eraseregions);
    814	kfree(mtd);
    815	kfree(cfi->cmdset_priv);
    816	return NULL;
    817}
    818
    819/*
    820 * Return true if the chip is ready and has the correct value.
    821 *
    822 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
    823 * non-suspended sector) and is indicated by no toggle bits toggling.
    824 *
    825 * Error are indicated by toggling bits or bits held with the wrong value,
    826 * or with bits toggling.
    827 *
    828 * Note that anything more complicated than checking if no bits are toggling
    829 * (including checking DQ5 for an error status) is tricky to get working
    830 * correctly and is therefore not done	(particularly with interleaved chips
    831 * as each chip must be checked independently of the others).
    832 */
    833static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
    834			       unsigned long addr, map_word *expected)
    835{
    836	struct cfi_private *cfi = map->fldrv_priv;
    837	map_word oldd, curd;
    838	int ret;
    839
    840	if (cfi_use_status_reg(cfi)) {
    841		map_word ready = CMD(CFI_SR_DRB);
    842		/*
    843		 * For chips that support status register, check device
    844		 * ready bit
    845		 */
    846		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
    847				 cfi->device_type, NULL);
    848		curd = map_read(map, addr);
    849
    850		return map_word_andequal(map, curd, ready, ready);
    851	}
    852
    853	oldd = map_read(map, addr);
    854	curd = map_read(map, addr);
    855
    856	ret = map_word_equal(map, oldd, curd);
    857
    858	if (!ret || !expected)
    859		return ret;
    860
    861	return map_word_equal(map, curd, *expected);
    862}
    863
    864static int __xipram chip_good(struct map_info *map, struct flchip *chip,
    865			      unsigned long addr, map_word *expected)
    866{
    867	struct cfi_private *cfi = map->fldrv_priv;
    868	map_word *datum = expected;
    869
    870	if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
    871		datum = NULL;
    872
    873	return chip_ready(map, chip, addr, datum);
    874}
    875
    876static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
    877{
    878	DECLARE_WAITQUEUE(wait, current);
    879	struct cfi_private *cfi = map->fldrv_priv;
    880	unsigned long timeo;
    881	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
    882
    883 resettime:
    884	timeo = jiffies + HZ;
    885 retry:
    886	switch (chip->state) {
    887
    888	case FL_STATUS:
    889		for (;;) {
    890			if (chip_ready(map, chip, adr, NULL))
    891				break;
    892
    893			if (time_after(jiffies, timeo)) {
    894				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
    895				return -EIO;
    896			}
    897			mutex_unlock(&chip->mutex);
    898			cfi_udelay(1);
    899			mutex_lock(&chip->mutex);
    900			/* Someone else might have been playing with it. */
    901			goto retry;
    902		}
    903		return 0;
    904
    905	case FL_READY:
    906	case FL_CFI_QUERY:
    907	case FL_JEDEC_QUERY:
    908		return 0;
    909
    910	case FL_ERASING:
    911		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
    912		    !(mode == FL_READY || mode == FL_POINT ||
    913		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
    914			goto sleep;
    915
    916		/* Do not allow suspend iff read/write to EB address */
    917		if ((adr & chip->in_progress_block_mask) ==
    918		    chip->in_progress_block_addr)
    919			goto sleep;
    920
    921		/* Erase suspend */
    922		/* It's harmless to issue the Erase-Suspend and Erase-Resume
    923		 * commands when the erase algorithm isn't in progress. */
    924		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
    925		chip->oldstate = FL_ERASING;
    926		chip->state = FL_ERASE_SUSPENDING;
    927		chip->erase_suspended = 1;
    928		for (;;) {
    929			if (chip_ready(map, chip, adr, NULL))
    930				break;
    931
    932			if (time_after(jiffies, timeo)) {
    933				/* Should have suspended the erase by now.
    934				 * Send an Erase-Resume command as either
    935				 * there was an error (so leave the erase
    936				 * routine to recover from it) or we trying to
    937				 * use the erase-in-progress sector. */
    938				put_chip(map, chip, adr);
    939				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
    940				return -EIO;
    941			}
    942
    943			mutex_unlock(&chip->mutex);
    944			cfi_udelay(1);
    945			mutex_lock(&chip->mutex);
    946			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
    947			   So we can just loop here. */
    948		}
    949		chip->state = FL_READY;
    950		return 0;
    951
    952	case FL_XIP_WHILE_ERASING:
    953		if (mode != FL_READY && mode != FL_POINT &&
    954		    (!cfip || !(cfip->EraseSuspend&2)))
    955			goto sleep;
    956		chip->oldstate = chip->state;
    957		chip->state = FL_READY;
    958		return 0;
    959
    960	case FL_SHUTDOWN:
    961		/* The machine is rebooting */
    962		return -EIO;
    963
    964	case FL_POINT:
    965		/* Only if there's no operation suspended... */
    966		if (mode == FL_READY && chip->oldstate == FL_READY)
    967			return 0;
    968		fallthrough;
    969	default:
    970	sleep:
    971		set_current_state(TASK_UNINTERRUPTIBLE);
    972		add_wait_queue(&chip->wq, &wait);
    973		mutex_unlock(&chip->mutex);
    974		schedule();
    975		remove_wait_queue(&chip->wq, &wait);
    976		mutex_lock(&chip->mutex);
    977		goto resettime;
    978	}
    979}
    980
    981
    982static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
    983{
    984	struct cfi_private *cfi = map->fldrv_priv;
    985
    986	switch(chip->oldstate) {
    987	case FL_ERASING:
    988		cfi_fixup_m29ew_erase_suspend(map,
    989			chip->in_progress_block_addr);
    990		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
    991		cfi_fixup_m29ew_delay_after_resume(cfi);
    992		chip->oldstate = FL_READY;
    993		chip->state = FL_ERASING;
    994		break;
    995
    996	case FL_XIP_WHILE_ERASING:
    997		chip->state = chip->oldstate;
    998		chip->oldstate = FL_READY;
    999		break;
   1000
   1001	case FL_READY:
   1002	case FL_STATUS:
   1003		break;
   1004	default:
   1005		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
   1006	}
   1007	wake_up(&chip->wq);
   1008}
   1009
   1010#ifdef CONFIG_MTD_XIP
   1011
   1012/*
   1013 * No interrupt what so ever can be serviced while the flash isn't in array
   1014 * mode.  This is ensured by the xip_disable() and xip_enable() functions
   1015 * enclosing any code path where the flash is known not to be in array mode.
   1016 * And within a XIP disabled code path, only functions marked with __xipram
   1017 * may be called and nothing else (it's a good thing to inspect generated
   1018 * assembly to make sure inline functions were actually inlined and that gcc
   1019 * didn't emit calls to its own support functions). Also configuring MTD CFI
   1020 * support to a single buswidth and a single interleave is also recommended.
   1021 */
   1022
   1023static void xip_disable(struct map_info *map, struct flchip *chip,
   1024			unsigned long adr)
   1025{
   1026	/* TODO: chips with no XIP use should ignore and return */
   1027	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
   1028	local_irq_disable();
   1029}
   1030
   1031static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
   1032				unsigned long adr)
   1033{
   1034	struct cfi_private *cfi = map->fldrv_priv;
   1035
   1036	if (chip->state != FL_POINT && chip->state != FL_READY) {
   1037		map_write(map, CMD(0xf0), adr);
   1038		chip->state = FL_READY;
   1039	}
   1040	(void) map_read(map, adr);
   1041	xip_iprefetch();
   1042	local_irq_enable();
   1043}
   1044
   1045/*
   1046 * When a delay is required for the flash operation to complete, the
   1047 * xip_udelay() function is polling for both the given timeout and pending
   1048 * (but still masked) hardware interrupts.  Whenever there is an interrupt
   1049 * pending then the flash erase operation is suspended, array mode restored
   1050 * and interrupts unmasked.  Task scheduling might also happen at that
   1051 * point.  The CPU eventually returns from the interrupt or the call to
   1052 * schedule() and the suspended flash operation is resumed for the remaining
   1053 * of the delay period.
   1054 *
   1055 * Warning: this function _will_ fool interrupt latency tracing tools.
   1056 */
   1057
   1058static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
   1059				unsigned long adr, int usec)
   1060{
   1061	struct cfi_private *cfi = map->fldrv_priv;
   1062	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
   1063	map_word status, OK = CMD(0x80);
   1064	unsigned long suspended, start = xip_currtime();
   1065	flstate_t oldstate;
   1066
   1067	do {
   1068		cpu_relax();
   1069		if (xip_irqpending() && extp &&
   1070		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
   1071		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
   1072			/*
   1073			 * Let's suspend the erase operation when supported.
   1074			 * Note that we currently don't try to suspend
   1075			 * interleaved chips if there is already another
   1076			 * operation suspended (imagine what happens
   1077			 * when one chip was already done with the current
   1078			 * operation while another chip suspended it, then
   1079			 * we resume the whole thing at once).  Yes, it
   1080			 * can happen!
   1081			 */
   1082			map_write(map, CMD(0xb0), adr);
   1083			usec -= xip_elapsed_since(start);
   1084			suspended = xip_currtime();
   1085			do {
   1086				if (xip_elapsed_since(suspended) > 100000) {
   1087					/*
   1088					 * The chip doesn't want to suspend
   1089					 * after waiting for 100 msecs.
   1090					 * This is a critical error but there
   1091					 * is not much we can do here.
   1092					 */
   1093					return;
   1094				}
   1095				status = map_read(map, adr);
   1096			} while (!map_word_andequal(map, status, OK, OK));
   1097
   1098			/* Suspend succeeded */
   1099			oldstate = chip->state;
   1100			if (!map_word_bitsset(map, status, CMD(0x40)))
   1101				break;
   1102			chip->state = FL_XIP_WHILE_ERASING;
   1103			chip->erase_suspended = 1;
   1104			map_write(map, CMD(0xf0), adr);
   1105			(void) map_read(map, adr);
   1106			xip_iprefetch();
   1107			local_irq_enable();
   1108			mutex_unlock(&chip->mutex);
   1109			xip_iprefetch();
   1110			cond_resched();
   1111
   1112			/*
   1113			 * We're back.  However someone else might have
   1114			 * decided to go write to the chip if we are in
   1115			 * a suspended erase state.  If so let's wait
   1116			 * until it's done.
   1117			 */
   1118			mutex_lock(&chip->mutex);
   1119			while (chip->state != FL_XIP_WHILE_ERASING) {
   1120				DECLARE_WAITQUEUE(wait, current);
   1121				set_current_state(TASK_UNINTERRUPTIBLE);
   1122				add_wait_queue(&chip->wq, &wait);
   1123				mutex_unlock(&chip->mutex);
   1124				schedule();
   1125				remove_wait_queue(&chip->wq, &wait);
   1126				mutex_lock(&chip->mutex);
   1127			}
   1128			/* Disallow XIP again */
   1129			local_irq_disable();
   1130
   1131			/* Correct Erase Suspend Hangups for M29EW */
   1132			cfi_fixup_m29ew_erase_suspend(map, adr);
   1133			/* Resume the write or erase operation */
   1134			map_write(map, cfi->sector_erase_cmd, adr);
   1135			chip->state = oldstate;
   1136			start = xip_currtime();
   1137		} else if (usec >= 1000000/HZ) {
   1138			/*
   1139			 * Try to save on CPU power when waiting delay
   1140			 * is at least a system timer tick period.
   1141			 * No need to be extremely accurate here.
   1142			 */
   1143			xip_cpu_idle();
   1144		}
   1145		status = map_read(map, adr);
   1146	} while (!map_word_andequal(map, status, OK, OK)
   1147		 && xip_elapsed_since(start) < usec);
   1148}
   1149
   1150#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
   1151
   1152/*
   1153 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
   1154 * the flash is actively programming or erasing since we have to poll for
   1155 * the operation to complete anyway.  We can't do that in a generic way with
   1156 * a XIP setup so do it before the actual flash operation in this case
   1157 * and stub it out from INVALIDATE_CACHE_UDELAY.
   1158 */
   1159#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
   1160	INVALIDATE_CACHED_RANGE(map, from, size)
   1161
   1162#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
   1163	UDELAY(map, chip, adr, usec)
   1164
   1165/*
   1166 * Extra notes:
   1167 *
   1168 * Activating this XIP support changes the way the code works a bit.  For
   1169 * example the code to suspend the current process when concurrent access
   1170 * happens is never executed because xip_udelay() will always return with the
   1171 * same chip state as it was entered with.  This is why there is no care for
   1172 * the presence of add_wait_queue() or schedule() calls from within a couple
   1173 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
   1174 * The queueing and scheduling are always happening within xip_udelay().
   1175 *
   1176 * Similarly, get_chip() and put_chip() just happen to always be executed
   1177 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
   1178 * is in array mode, therefore never executing many cases therein and not
   1179 * causing any problem with XIP.
   1180 */
   1181
   1182#else
   1183
   1184#define xip_disable(map, chip, adr)
   1185#define xip_enable(map, chip, adr)
   1186#define XIP_INVAL_CACHED_RANGE(x...)
   1187
   1188#define UDELAY(map, chip, adr, usec)  \
   1189do {  \
   1190	mutex_unlock(&chip->mutex);  \
   1191	cfi_udelay(usec);  \
   1192	mutex_lock(&chip->mutex);  \
   1193} while (0)
   1194
   1195#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
   1196do {  \
   1197	mutex_unlock(&chip->mutex);  \
   1198	INVALIDATE_CACHED_RANGE(map, adr, len);  \
   1199	cfi_udelay(usec);  \
   1200	mutex_lock(&chip->mutex);  \
   1201} while (0)
   1202
   1203#endif
   1204
   1205static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
   1206{
   1207	unsigned long cmd_addr;
   1208	struct cfi_private *cfi = map->fldrv_priv;
   1209	int ret;
   1210
   1211	adr += chip->start;
   1212
   1213	/* Ensure cmd read/writes are aligned. */
   1214	cmd_addr = adr & ~(map_bankwidth(map)-1);
   1215
   1216	mutex_lock(&chip->mutex);
   1217	ret = get_chip(map, chip, cmd_addr, FL_READY);
   1218	if (ret) {
   1219		mutex_unlock(&chip->mutex);
   1220		return ret;
   1221	}
   1222
   1223	if (chip->state != FL_POINT && chip->state != FL_READY) {
   1224		map_write(map, CMD(0xf0), cmd_addr);
   1225		chip->state = FL_READY;
   1226	}
   1227
   1228	map_copy_from(map, buf, adr, len);
   1229
   1230	put_chip(map, chip, cmd_addr);
   1231
   1232	mutex_unlock(&chip->mutex);
   1233	return 0;
   1234}
   1235
   1236
   1237static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
   1238{
   1239	struct map_info *map = mtd->priv;
   1240	struct cfi_private *cfi = map->fldrv_priv;
   1241	unsigned long ofs;
   1242	int chipnum;
   1243	int ret = 0;
   1244
   1245	/* ofs: offset within the first chip that the first read should start */
   1246	chipnum = (from >> cfi->chipshift);
   1247	ofs = from - (chipnum <<  cfi->chipshift);
   1248
   1249	while (len) {
   1250		unsigned long thislen;
   1251
   1252		if (chipnum >= cfi->numchips)
   1253			break;
   1254
   1255		if ((len + ofs -1) >> cfi->chipshift)
   1256			thislen = (1<<cfi->chipshift) - ofs;
   1257		else
   1258			thislen = len;
   1259
   1260		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
   1261		if (ret)
   1262			break;
   1263
   1264		*retlen += thislen;
   1265		len -= thislen;
   1266		buf += thislen;
   1267
   1268		ofs = 0;
   1269		chipnum++;
   1270	}
   1271	return ret;
   1272}
   1273
   1274typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
   1275			loff_t adr, size_t len, u_char *buf, size_t grouplen);
   1276
   1277static inline void otp_enter(struct map_info *map, struct flchip *chip,
   1278			     loff_t adr, size_t len)
   1279{
   1280	struct cfi_private *cfi = map->fldrv_priv;
   1281
   1282	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   1283			 cfi->device_type, NULL);
   1284	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   1285			 cfi->device_type, NULL);
   1286	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
   1287			 cfi->device_type, NULL);
   1288
   1289	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
   1290}
   1291
   1292static inline void otp_exit(struct map_info *map, struct flchip *chip,
   1293			    loff_t adr, size_t len)
   1294{
   1295	struct cfi_private *cfi = map->fldrv_priv;
   1296
   1297	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   1298			 cfi->device_type, NULL);
   1299	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   1300			 cfi->device_type, NULL);
   1301	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
   1302			 cfi->device_type, NULL);
   1303	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
   1304			 cfi->device_type, NULL);
   1305
   1306	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
   1307}
   1308
   1309static inline int do_read_secsi_onechip(struct map_info *map,
   1310					struct flchip *chip, loff_t adr,
   1311					size_t len, u_char *buf,
   1312					size_t grouplen)
   1313{
   1314	DECLARE_WAITQUEUE(wait, current);
   1315
   1316 retry:
   1317	mutex_lock(&chip->mutex);
   1318
   1319	if (chip->state != FL_READY){
   1320		set_current_state(TASK_UNINTERRUPTIBLE);
   1321		add_wait_queue(&chip->wq, &wait);
   1322
   1323		mutex_unlock(&chip->mutex);
   1324
   1325		schedule();
   1326		remove_wait_queue(&chip->wq, &wait);
   1327
   1328		goto retry;
   1329	}
   1330
   1331	adr += chip->start;
   1332
   1333	chip->state = FL_READY;
   1334
   1335	otp_enter(map, chip, adr, len);
   1336	map_copy_from(map, buf, adr, len);
   1337	otp_exit(map, chip, adr, len);
   1338
   1339	wake_up(&chip->wq);
   1340	mutex_unlock(&chip->mutex);
   1341
   1342	return 0;
   1343}
   1344
   1345static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
   1346{
   1347	struct map_info *map = mtd->priv;
   1348	struct cfi_private *cfi = map->fldrv_priv;
   1349	unsigned long ofs;
   1350	int chipnum;
   1351	int ret = 0;
   1352
   1353	/* ofs: offset within the first chip that the first read should start */
   1354	/* 8 secsi bytes per chip */
   1355	chipnum=from>>3;
   1356	ofs=from & 7;
   1357
   1358	while (len) {
   1359		unsigned long thislen;
   1360
   1361		if (chipnum >= cfi->numchips)
   1362			break;
   1363
   1364		if ((len + ofs -1) >> 3)
   1365			thislen = (1<<3) - ofs;
   1366		else
   1367			thislen = len;
   1368
   1369		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
   1370					    thislen, buf, 0);
   1371		if (ret)
   1372			break;
   1373
   1374		*retlen += thislen;
   1375		len -= thislen;
   1376		buf += thislen;
   1377
   1378		ofs = 0;
   1379		chipnum++;
   1380	}
   1381	return ret;
   1382}
   1383
   1384static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
   1385				     unsigned long adr, map_word datum,
   1386				     int mode);
   1387
   1388static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
   1389			size_t len, u_char *buf, size_t grouplen)
   1390{
   1391	int ret;
   1392	while (len) {
   1393		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
   1394		int gap = adr - bus_ofs;
   1395		int n = min_t(int, len, map_bankwidth(map) - gap);
   1396		map_word datum = map_word_ff(map);
   1397
   1398		if (n != map_bankwidth(map)) {
   1399			/* partial write of a word, load old contents */
   1400			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
   1401			datum = map_read(map, bus_ofs);
   1402			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
   1403		}
   1404
   1405		datum = map_word_load_partial(map, datum, buf, gap, n);
   1406		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
   1407		if (ret)
   1408			return ret;
   1409
   1410		adr += n;
   1411		buf += n;
   1412		len -= n;
   1413	}
   1414
   1415	return 0;
   1416}
   1417
   1418static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
   1419		       size_t len, u_char *buf, size_t grouplen)
   1420{
   1421	struct cfi_private *cfi = map->fldrv_priv;
   1422	uint8_t lockreg;
   1423	unsigned long timeo;
   1424	int ret;
   1425
   1426	/* make sure area matches group boundaries */
   1427	if ((adr != 0) || (len != grouplen))
   1428		return -EINVAL;
   1429
   1430	mutex_lock(&chip->mutex);
   1431	ret = get_chip(map, chip, chip->start, FL_LOCKING);
   1432	if (ret) {
   1433		mutex_unlock(&chip->mutex);
   1434		return ret;
   1435	}
   1436	chip->state = FL_LOCKING;
   1437
   1438	/* Enter lock register command */
   1439	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   1440			 cfi->device_type, NULL);
   1441	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   1442			 cfi->device_type, NULL);
   1443	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
   1444			 cfi->device_type, NULL);
   1445
   1446	/* read lock register */
   1447	lockreg = cfi_read_query(map, 0);
   1448
   1449	/* set bit 0 to protect extended memory block */
   1450	lockreg &= ~0x01;
   1451
   1452	/* set bit 0 to protect extended memory block */
   1453	/* write lock register */
   1454	map_write(map, CMD(0xA0), chip->start);
   1455	map_write(map, CMD(lockreg), chip->start);
   1456
   1457	/* wait for chip to become ready */
   1458	timeo = jiffies + msecs_to_jiffies(2);
   1459	for (;;) {
   1460		if (chip_ready(map, chip, adr, NULL))
   1461			break;
   1462
   1463		if (time_after(jiffies, timeo)) {
   1464			pr_err("Waiting for chip to be ready timed out.\n");
   1465			ret = -EIO;
   1466			break;
   1467		}
   1468		UDELAY(map, chip, 0, 1);
   1469	}
   1470
   1471	/* exit protection commands */
   1472	map_write(map, CMD(0x90), chip->start);
   1473	map_write(map, CMD(0x00), chip->start);
   1474
   1475	chip->state = FL_READY;
   1476	put_chip(map, chip, chip->start);
   1477	mutex_unlock(&chip->mutex);
   1478
   1479	return ret;
   1480}
   1481
   1482static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
   1483			       size_t *retlen, u_char *buf,
   1484			       otp_op_t action, int user_regs)
   1485{
   1486	struct map_info *map = mtd->priv;
   1487	struct cfi_private *cfi = map->fldrv_priv;
   1488	int ofs_factor = cfi->interleave * cfi->device_type;
   1489	unsigned long base;
   1490	int chipnum;
   1491	struct flchip *chip;
   1492	uint8_t otp, lockreg;
   1493	int ret;
   1494
   1495	size_t user_size, factory_size, otpsize;
   1496	loff_t user_offset, factory_offset, otpoffset;
   1497	int user_locked = 0, otplocked;
   1498
   1499	*retlen = 0;
   1500
   1501	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
   1502		chip = &cfi->chips[chipnum];
   1503		factory_size = 0;
   1504		user_size = 0;
   1505
   1506		/* Micron M29EW family */
   1507		if (is_m29ew(cfi)) {
   1508			base = chip->start;
   1509
   1510			/* check whether secsi area is factory locked
   1511			   or user lockable */
   1512			mutex_lock(&chip->mutex);
   1513			ret = get_chip(map, chip, base, FL_CFI_QUERY);
   1514			if (ret) {
   1515				mutex_unlock(&chip->mutex);
   1516				return ret;
   1517			}
   1518			cfi_qry_mode_on(base, map, cfi);
   1519			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
   1520			cfi_qry_mode_off(base, map, cfi);
   1521			put_chip(map, chip, base);
   1522			mutex_unlock(&chip->mutex);
   1523
   1524			if (otp & 0x80) {
   1525				/* factory locked */
   1526				factory_offset = 0;
   1527				factory_size = 0x100;
   1528			} else {
   1529				/* customer lockable */
   1530				user_offset = 0;
   1531				user_size = 0x100;
   1532
   1533				mutex_lock(&chip->mutex);
   1534				ret = get_chip(map, chip, base, FL_LOCKING);
   1535				if (ret) {
   1536					mutex_unlock(&chip->mutex);
   1537					return ret;
   1538				}
   1539
   1540				/* Enter lock register command */
   1541				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
   1542						 chip->start, map, cfi,
   1543						 cfi->device_type, NULL);
   1544				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
   1545						 chip->start, map, cfi,
   1546						 cfi->device_type, NULL);
   1547				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
   1548						 chip->start, map, cfi,
   1549						 cfi->device_type, NULL);
   1550				/* read lock register */
   1551				lockreg = cfi_read_query(map, 0);
   1552				/* exit protection commands */
   1553				map_write(map, CMD(0x90), chip->start);
   1554				map_write(map, CMD(0x00), chip->start);
   1555				put_chip(map, chip, chip->start);
   1556				mutex_unlock(&chip->mutex);
   1557
   1558				user_locked = ((lockreg & 0x01) == 0x00);
   1559			}
   1560		}
   1561
   1562		otpsize = user_regs ? user_size : factory_size;
   1563		if (!otpsize)
   1564			continue;
   1565		otpoffset = user_regs ? user_offset : factory_offset;
   1566		otplocked = user_regs ? user_locked : 1;
   1567
   1568		if (!action) {
   1569			/* return otpinfo */
   1570			struct otp_info *otpinfo;
   1571			len -= sizeof(*otpinfo);
   1572			if (len <= 0)
   1573				return -ENOSPC;
   1574			otpinfo = (struct otp_info *)buf;
   1575			otpinfo->start = from;
   1576			otpinfo->length = otpsize;
   1577			otpinfo->locked = otplocked;
   1578			buf += sizeof(*otpinfo);
   1579			*retlen += sizeof(*otpinfo);
   1580			from += otpsize;
   1581		} else if ((from < otpsize) && (len > 0)) {
   1582			size_t size;
   1583			size = (len < otpsize - from) ? len : otpsize - from;
   1584			ret = action(map, chip, otpoffset + from, size, buf,
   1585				     otpsize);
   1586			if (ret < 0)
   1587				return ret;
   1588
   1589			buf += size;
   1590			len -= size;
   1591			*retlen += size;
   1592			from = 0;
   1593		} else {
   1594			from -= otpsize;
   1595		}
   1596	}
   1597	return 0;
   1598}
   1599
   1600static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
   1601					 size_t *retlen, struct otp_info *buf)
   1602{
   1603	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
   1604				   NULL, 0);
   1605}
   1606
   1607static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
   1608					 size_t *retlen, struct otp_info *buf)
   1609{
   1610	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
   1611				   NULL, 1);
   1612}
   1613
   1614static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
   1615					 size_t len, size_t *retlen,
   1616					 u_char *buf)
   1617{
   1618	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
   1619				   buf, do_read_secsi_onechip, 0);
   1620}
   1621
   1622static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
   1623					 size_t len, size_t *retlen,
   1624					 u_char *buf)
   1625{
   1626	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
   1627				   buf, do_read_secsi_onechip, 1);
   1628}
   1629
   1630static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
   1631					  size_t len, size_t *retlen,
   1632					  const u_char *buf)
   1633{
   1634	return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
   1635				   do_otp_write, 1);
   1636}
   1637
   1638static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
   1639					 size_t len)
   1640{
   1641	size_t retlen;
   1642	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
   1643				   do_otp_lock, 1);
   1644}
   1645
   1646static int __xipram do_write_oneword_once(struct map_info *map,
   1647					  struct flchip *chip,
   1648					  unsigned long adr, map_word datum,
   1649					  int mode, struct cfi_private *cfi)
   1650{
   1651	unsigned long timeo;
   1652	/*
   1653	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
   1654	 * have a max write time of a few hundreds usec). However, we should
   1655	 * use the maximum timeout value given by the chip at probe time
   1656	 * instead.  Unfortunately, struct flchip does have a field for
   1657	 * maximum timeout, only for typical which can be far too short
   1658	 * depending of the conditions.	 The ' + 1' is to avoid having a
   1659	 * timeout of 0 jiffies if HZ is smaller than 1000.
   1660	 */
   1661	unsigned long uWriteTimeout = (HZ / 1000) + 1;
   1662	int ret = 0;
   1663
   1664	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   1665	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   1666	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   1667	map_write(map, datum, adr);
   1668	chip->state = mode;
   1669
   1670	INVALIDATE_CACHE_UDELAY(map, chip,
   1671				adr, map_bankwidth(map),
   1672				chip->word_write_time);
   1673
   1674	/* See comment above for timeout value. */
   1675	timeo = jiffies + uWriteTimeout;
   1676	for (;;) {
   1677		if (chip->state != mode) {
   1678			/* Someone's suspended the write. Sleep */
   1679			DECLARE_WAITQUEUE(wait, current);
   1680
   1681			set_current_state(TASK_UNINTERRUPTIBLE);
   1682			add_wait_queue(&chip->wq, &wait);
   1683			mutex_unlock(&chip->mutex);
   1684			schedule();
   1685			remove_wait_queue(&chip->wq, &wait);
   1686			timeo = jiffies + (HZ / 2); /* FIXME */
   1687			mutex_lock(&chip->mutex);
   1688			continue;
   1689		}
   1690
   1691		/*
   1692		 * We check "time_after" and "!chip_good" before checking
   1693		 * "chip_good" to avoid the failure due to scheduling.
   1694		 */
   1695		if (time_after(jiffies, timeo) &&
   1696		    !chip_good(map, chip, adr, &datum)) {
   1697			xip_enable(map, chip, adr);
   1698			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
   1699			xip_disable(map, chip, adr);
   1700			ret = -EIO;
   1701			break;
   1702		}
   1703
   1704		if (chip_good(map, chip, adr, &datum)) {
   1705			if (cfi_check_err_status(map, chip, adr))
   1706				ret = -EIO;
   1707			break;
   1708		}
   1709
   1710		/* Latency issues. Drop the lock, wait a while and retry */
   1711		UDELAY(map, chip, adr, 1);
   1712	}
   1713
   1714	return ret;
   1715}
   1716
   1717static int __xipram do_write_oneword_start(struct map_info *map,
   1718					   struct flchip *chip,
   1719					   unsigned long adr, int mode)
   1720{
   1721	int ret;
   1722
   1723	mutex_lock(&chip->mutex);
   1724
   1725	ret = get_chip(map, chip, adr, mode);
   1726	if (ret) {
   1727		mutex_unlock(&chip->mutex);
   1728		return ret;
   1729	}
   1730
   1731	if (mode == FL_OTP_WRITE)
   1732		otp_enter(map, chip, adr, map_bankwidth(map));
   1733
   1734	return ret;
   1735}
   1736
   1737static void __xipram do_write_oneword_done(struct map_info *map,
   1738					   struct flchip *chip,
   1739					   unsigned long adr, int mode)
   1740{
   1741	if (mode == FL_OTP_WRITE)
   1742		otp_exit(map, chip, adr, map_bankwidth(map));
   1743
   1744	chip->state = FL_READY;
   1745	DISABLE_VPP(map);
   1746	put_chip(map, chip, adr);
   1747
   1748	mutex_unlock(&chip->mutex);
   1749}
   1750
   1751static int __xipram do_write_oneword_retry(struct map_info *map,
   1752					   struct flchip *chip,
   1753					   unsigned long adr, map_word datum,
   1754					   int mode)
   1755{
   1756	struct cfi_private *cfi = map->fldrv_priv;
   1757	int ret = 0;
   1758	map_word oldd;
   1759	int retry_cnt = 0;
   1760
   1761	/*
   1762	 * Check for a NOP for the case when the datum to write is already
   1763	 * present - it saves time and works around buggy chips that corrupt
   1764	 * data at other locations when 0xff is written to a location that
   1765	 * already contains 0xff.
   1766	 */
   1767	oldd = map_read(map, adr);
   1768	if (map_word_equal(map, oldd, datum)) {
   1769		pr_debug("MTD %s(): NOP\n", __func__);
   1770		return ret;
   1771	}
   1772
   1773	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
   1774	ENABLE_VPP(map);
   1775	xip_disable(map, chip, adr);
   1776
   1777 retry:
   1778	ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
   1779	if (ret) {
   1780		/* reset on all failures. */
   1781		map_write(map, CMD(0xF0), chip->start);
   1782		/* FIXME - should have reset delay before continuing */
   1783
   1784		if (++retry_cnt <= MAX_RETRIES) {
   1785			ret = 0;
   1786			goto retry;
   1787		}
   1788	}
   1789	xip_enable(map, chip, adr);
   1790
   1791	return ret;
   1792}
   1793
   1794static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
   1795				     unsigned long adr, map_word datum,
   1796				     int mode)
   1797{
   1798	int ret;
   1799
   1800	adr += chip->start;
   1801
   1802	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
   1803		 datum.x[0]);
   1804
   1805	ret = do_write_oneword_start(map, chip, adr, mode);
   1806	if (ret)
   1807		return ret;
   1808
   1809	ret = do_write_oneword_retry(map, chip, adr, datum, mode);
   1810
   1811	do_write_oneword_done(map, chip, adr, mode);
   1812
   1813	return ret;
   1814}
   1815
   1816
   1817static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
   1818				  size_t *retlen, const u_char *buf)
   1819{
   1820	struct map_info *map = mtd->priv;
   1821	struct cfi_private *cfi = map->fldrv_priv;
   1822	int ret;
   1823	int chipnum;
   1824	unsigned long ofs, chipstart;
   1825	DECLARE_WAITQUEUE(wait, current);
   1826
   1827	chipnum = to >> cfi->chipshift;
   1828	ofs = to  - (chipnum << cfi->chipshift);
   1829	chipstart = cfi->chips[chipnum].start;
   1830
   1831	/* If it's not bus-aligned, do the first byte write */
   1832	if (ofs & (map_bankwidth(map)-1)) {
   1833		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
   1834		int i = ofs - bus_ofs;
   1835		int n = 0;
   1836		map_word tmp_buf;
   1837
   1838 retry:
   1839		mutex_lock(&cfi->chips[chipnum].mutex);
   1840
   1841		if (cfi->chips[chipnum].state != FL_READY) {
   1842			set_current_state(TASK_UNINTERRUPTIBLE);
   1843			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
   1844
   1845			mutex_unlock(&cfi->chips[chipnum].mutex);
   1846
   1847			schedule();
   1848			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
   1849			goto retry;
   1850		}
   1851
   1852		/* Load 'tmp_buf' with old contents of flash */
   1853		tmp_buf = map_read(map, bus_ofs+chipstart);
   1854
   1855		mutex_unlock(&cfi->chips[chipnum].mutex);
   1856
   1857		/* Number of bytes to copy from buffer */
   1858		n = min_t(int, len, map_bankwidth(map)-i);
   1859
   1860		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
   1861
   1862		ret = do_write_oneword(map, &cfi->chips[chipnum],
   1863				       bus_ofs, tmp_buf, FL_WRITING);
   1864		if (ret)
   1865			return ret;
   1866
   1867		ofs += n;
   1868		buf += n;
   1869		(*retlen) += n;
   1870		len -= n;
   1871
   1872		if (ofs >> cfi->chipshift) {
   1873			chipnum ++;
   1874			ofs = 0;
   1875			if (chipnum == cfi->numchips)
   1876				return 0;
   1877		}
   1878	}
   1879
   1880	/* We are now aligned, write as much as possible */
   1881	while(len >= map_bankwidth(map)) {
   1882		map_word datum;
   1883
   1884		datum = map_word_load(map, buf);
   1885
   1886		ret = do_write_oneword(map, &cfi->chips[chipnum],
   1887				       ofs, datum, FL_WRITING);
   1888		if (ret)
   1889			return ret;
   1890
   1891		ofs += map_bankwidth(map);
   1892		buf += map_bankwidth(map);
   1893		(*retlen) += map_bankwidth(map);
   1894		len -= map_bankwidth(map);
   1895
   1896		if (ofs >> cfi->chipshift) {
   1897			chipnum ++;
   1898			ofs = 0;
   1899			if (chipnum == cfi->numchips)
   1900				return 0;
   1901			chipstart = cfi->chips[chipnum].start;
   1902		}
   1903	}
   1904
   1905	/* Write the trailing bytes if any */
   1906	if (len & (map_bankwidth(map)-1)) {
   1907		map_word tmp_buf;
   1908
   1909 retry1:
   1910		mutex_lock(&cfi->chips[chipnum].mutex);
   1911
   1912		if (cfi->chips[chipnum].state != FL_READY) {
   1913			set_current_state(TASK_UNINTERRUPTIBLE);
   1914			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
   1915
   1916			mutex_unlock(&cfi->chips[chipnum].mutex);
   1917
   1918			schedule();
   1919			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
   1920			goto retry1;
   1921		}
   1922
   1923		tmp_buf = map_read(map, ofs + chipstart);
   1924
   1925		mutex_unlock(&cfi->chips[chipnum].mutex);
   1926
   1927		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
   1928
   1929		ret = do_write_oneword(map, &cfi->chips[chipnum],
   1930				       ofs, tmp_buf, FL_WRITING);
   1931		if (ret)
   1932			return ret;
   1933
   1934		(*retlen) += len;
   1935	}
   1936
   1937	return 0;
   1938}
   1939
   1940#if !FORCE_WORD_WRITE
   1941static int __xipram do_write_buffer_wait(struct map_info *map,
   1942					 struct flchip *chip, unsigned long adr,
   1943					 map_word datum)
   1944{
   1945	unsigned long timeo;
   1946	unsigned long u_write_timeout;
   1947	int ret = 0;
   1948
   1949	/*
   1950	 * Timeout is calculated according to CFI data, if available.
   1951	 * See more comments in cfi_cmdset_0002().
   1952	 */
   1953	u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
   1954	timeo = jiffies + u_write_timeout;
   1955
   1956	for (;;) {
   1957		if (chip->state != FL_WRITING) {
   1958			/* Someone's suspended the write. Sleep */
   1959			DECLARE_WAITQUEUE(wait, current);
   1960
   1961			set_current_state(TASK_UNINTERRUPTIBLE);
   1962			add_wait_queue(&chip->wq, &wait);
   1963			mutex_unlock(&chip->mutex);
   1964			schedule();
   1965			remove_wait_queue(&chip->wq, &wait);
   1966			timeo = jiffies + (HZ / 2); /* FIXME */
   1967			mutex_lock(&chip->mutex);
   1968			continue;
   1969		}
   1970
   1971		/*
   1972		 * We check "time_after" and "!chip_good" before checking
   1973		 * "chip_good" to avoid the failure due to scheduling.
   1974		 */
   1975		if (time_after(jiffies, timeo) &&
   1976		    !chip_good(map, chip, adr, &datum)) {
   1977			pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
   1978			       __func__, adr);
   1979			ret = -EIO;
   1980			break;
   1981		}
   1982
   1983		if (chip_good(map, chip, adr, &datum)) {
   1984			if (cfi_check_err_status(map, chip, adr))
   1985				ret = -EIO;
   1986			break;
   1987		}
   1988
   1989		/* Latency issues. Drop the lock, wait a while and retry */
   1990		UDELAY(map, chip, adr, 1);
   1991	}
   1992
   1993	return ret;
   1994}
   1995
   1996static void __xipram do_write_buffer_reset(struct map_info *map,
   1997					   struct flchip *chip,
   1998					   struct cfi_private *cfi)
   1999{
   2000	/*
   2001	 * Recovery from write-buffer programming failures requires
   2002	 * the write-to-buffer-reset sequence.  Since the last part
   2003	 * of the sequence also works as a normal reset, we can run
   2004	 * the same commands regardless of why we are here.
   2005	 * See e.g.
   2006	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
   2007	 */
   2008	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   2009			 cfi->device_type, NULL);
   2010	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   2011			 cfi->device_type, NULL);
   2012	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
   2013			 cfi->device_type, NULL);
   2014
   2015	/* FIXME - should have reset delay before continuing */
   2016}
   2017
   2018/*
   2019 * FIXME: interleaved mode not tested, and probably not supported!
   2020 */
   2021static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
   2022				    unsigned long adr, const u_char *buf,
   2023				    int len)
   2024{
   2025	struct cfi_private *cfi = map->fldrv_priv;
   2026	int ret;
   2027	unsigned long cmd_adr;
   2028	int z, words;
   2029	map_word datum;
   2030
   2031	adr += chip->start;
   2032	cmd_adr = adr;
   2033
   2034	mutex_lock(&chip->mutex);
   2035	ret = get_chip(map, chip, adr, FL_WRITING);
   2036	if (ret) {
   2037		mutex_unlock(&chip->mutex);
   2038		return ret;
   2039	}
   2040
   2041	datum = map_word_load(map, buf);
   2042
   2043	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
   2044		 __func__, adr, datum.x[0]);
   2045
   2046	XIP_INVAL_CACHED_RANGE(map, adr, len);
   2047	ENABLE_VPP(map);
   2048	xip_disable(map, chip, cmd_adr);
   2049
   2050	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2051	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   2052
   2053	/* Write Buffer Load */
   2054	map_write(map, CMD(0x25), cmd_adr);
   2055
   2056	chip->state = FL_WRITING_TO_BUFFER;
   2057
   2058	/* Write length of data to come */
   2059	words = len / map_bankwidth(map);
   2060	map_write(map, CMD(words - 1), cmd_adr);
   2061	/* Write data */
   2062	z = 0;
   2063	while(z < words * map_bankwidth(map)) {
   2064		datum = map_word_load(map, buf);
   2065		map_write(map, datum, adr + z);
   2066
   2067		z += map_bankwidth(map);
   2068		buf += map_bankwidth(map);
   2069	}
   2070	z -= map_bankwidth(map);
   2071
   2072	adr += z;
   2073
   2074	/* Write Buffer Program Confirm: GO GO GO */
   2075	map_write(map, CMD(0x29), cmd_adr);
   2076	chip->state = FL_WRITING;
   2077
   2078	INVALIDATE_CACHE_UDELAY(map, chip,
   2079				adr, map_bankwidth(map),
   2080				chip->word_write_time);
   2081
   2082	ret = do_write_buffer_wait(map, chip, adr, datum);
   2083	if (ret)
   2084		do_write_buffer_reset(map, chip, cfi);
   2085
   2086	xip_enable(map, chip, adr);
   2087
   2088	chip->state = FL_READY;
   2089	DISABLE_VPP(map);
   2090	put_chip(map, chip, adr);
   2091	mutex_unlock(&chip->mutex);
   2092
   2093	return ret;
   2094}
   2095
   2096
   2097static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
   2098				    size_t *retlen, const u_char *buf)
   2099{
   2100	struct map_info *map = mtd->priv;
   2101	struct cfi_private *cfi = map->fldrv_priv;
   2102	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
   2103	int ret;
   2104	int chipnum;
   2105	unsigned long ofs;
   2106
   2107	chipnum = to >> cfi->chipshift;
   2108	ofs = to  - (chipnum << cfi->chipshift);
   2109
   2110	/* If it's not bus-aligned, do the first word write */
   2111	if (ofs & (map_bankwidth(map)-1)) {
   2112		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
   2113		if (local_len > len)
   2114			local_len = len;
   2115		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
   2116					     local_len, retlen, buf);
   2117		if (ret)
   2118			return ret;
   2119		ofs += local_len;
   2120		buf += local_len;
   2121		len -= local_len;
   2122
   2123		if (ofs >> cfi->chipshift) {
   2124			chipnum ++;
   2125			ofs = 0;
   2126			if (chipnum == cfi->numchips)
   2127				return 0;
   2128		}
   2129	}
   2130
   2131	/* Write buffer is worth it only if more than one word to write... */
   2132	while (len >= map_bankwidth(map) * 2) {
   2133		/* We must not cross write block boundaries */
   2134		int size = wbufsize - (ofs & (wbufsize-1));
   2135
   2136		if (size > len)
   2137			size = len;
   2138		if (size % map_bankwidth(map))
   2139			size -= size % map_bankwidth(map);
   2140
   2141		ret = do_write_buffer(map, &cfi->chips[chipnum],
   2142				      ofs, buf, size);
   2143		if (ret)
   2144			return ret;
   2145
   2146		ofs += size;
   2147		buf += size;
   2148		(*retlen) += size;
   2149		len -= size;
   2150
   2151		if (ofs >> cfi->chipshift) {
   2152			chipnum ++;
   2153			ofs = 0;
   2154			if (chipnum == cfi->numchips)
   2155				return 0;
   2156		}
   2157	}
   2158
   2159	if (len) {
   2160		size_t retlen_dregs = 0;
   2161
   2162		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
   2163					     len, &retlen_dregs, buf);
   2164
   2165		*retlen += retlen_dregs;
   2166		return ret;
   2167	}
   2168
   2169	return 0;
   2170}
   2171#endif /* !FORCE_WORD_WRITE */
   2172
   2173/*
   2174 * Wait for the flash chip to become ready to write data
   2175 *
   2176 * This is only called during the panic_write() path. When panic_write()
   2177 * is called, the kernel is in the process of a panic, and will soon be
   2178 * dead. Therefore we don't take any locks, and attempt to get access
   2179 * to the chip as soon as possible.
   2180 */
   2181static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
   2182				 unsigned long adr)
   2183{
   2184	struct cfi_private *cfi = map->fldrv_priv;
   2185	int retries = 10;
   2186	int i;
   2187
   2188	/*
   2189	 * If the driver thinks the chip is idle, and no toggle bits
   2190	 * are changing, then the chip is actually idle for sure.
   2191	 */
   2192	if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
   2193		return 0;
   2194
   2195	/*
   2196	 * Try several times to reset the chip and then wait for it
   2197	 * to become idle. The upper limit of a few milliseconds of
   2198	 * delay isn't a big problem: the kernel is dying anyway. It
   2199	 * is more important to save the messages.
   2200	 */
   2201	while (retries > 0) {
   2202		const unsigned long timeo = (HZ / 1000) + 1;
   2203
   2204		/* send the reset command */
   2205		map_write(map, CMD(0xF0), chip->start);
   2206
   2207		/* wait for the chip to become ready */
   2208		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
   2209			if (chip_ready(map, chip, adr, NULL))
   2210				return 0;
   2211
   2212			udelay(1);
   2213		}
   2214
   2215		retries--;
   2216	}
   2217
   2218	/* the chip never became ready */
   2219	return -EBUSY;
   2220}
   2221
   2222/*
   2223 * Write out one word of data to a single flash chip during a kernel panic
   2224 *
   2225 * This is only called during the panic_write() path. When panic_write()
   2226 * is called, the kernel is in the process of a panic, and will soon be
   2227 * dead. Therefore we don't take any locks, and attempt to get access
   2228 * to the chip as soon as possible.
   2229 *
   2230 * The implementation of this routine is intentionally similar to
   2231 * do_write_oneword(), in order to ease code maintenance.
   2232 */
   2233static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
   2234				  unsigned long adr, map_word datum)
   2235{
   2236	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
   2237	struct cfi_private *cfi = map->fldrv_priv;
   2238	int retry_cnt = 0;
   2239	map_word oldd;
   2240	int ret;
   2241	int i;
   2242
   2243	adr += chip->start;
   2244
   2245	ret = cfi_amdstd_panic_wait(map, chip, adr);
   2246	if (ret)
   2247		return ret;
   2248
   2249	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
   2250			__func__, adr, datum.x[0]);
   2251
   2252	/*
   2253	 * Check for a NOP for the case when the datum to write is already
   2254	 * present - it saves time and works around buggy chips that corrupt
   2255	 * data at other locations when 0xff is written to a location that
   2256	 * already contains 0xff.
   2257	 */
   2258	oldd = map_read(map, adr);
   2259	if (map_word_equal(map, oldd, datum)) {
   2260		pr_debug("MTD %s(): NOP\n", __func__);
   2261		goto op_done;
   2262	}
   2263
   2264	ENABLE_VPP(map);
   2265
   2266retry:
   2267	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2268	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   2269	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2270	map_write(map, datum, adr);
   2271
   2272	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
   2273		if (chip_ready(map, chip, adr, NULL))
   2274			break;
   2275
   2276		udelay(1);
   2277	}
   2278
   2279	if (!chip_ready(map, chip, adr, &datum) ||
   2280	    cfi_check_err_status(map, chip, adr)) {
   2281		/* reset on all failures. */
   2282		map_write(map, CMD(0xF0), chip->start);
   2283		/* FIXME - should have reset delay before continuing */
   2284
   2285		if (++retry_cnt <= MAX_RETRIES)
   2286			goto retry;
   2287
   2288		ret = -EIO;
   2289	}
   2290
   2291op_done:
   2292	DISABLE_VPP(map);
   2293	return ret;
   2294}
   2295
   2296/*
   2297 * Write out some data during a kernel panic
   2298 *
   2299 * This is used by the mtdoops driver to save the dying messages from a
   2300 * kernel which has panic'd.
   2301 *
   2302 * This routine ignores all of the locking used throughout the rest of the
   2303 * driver, in order to ensure that the data gets written out no matter what
   2304 * state this driver (and the flash chip itself) was in when the kernel crashed.
   2305 *
   2306 * The implementation of this routine is intentionally similar to
   2307 * cfi_amdstd_write_words(), in order to ease code maintenance.
   2308 */
   2309static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
   2310				  size_t *retlen, const u_char *buf)
   2311{
   2312	struct map_info *map = mtd->priv;
   2313	struct cfi_private *cfi = map->fldrv_priv;
   2314	unsigned long ofs, chipstart;
   2315	int ret;
   2316	int chipnum;
   2317
   2318	chipnum = to >> cfi->chipshift;
   2319	ofs = to - (chipnum << cfi->chipshift);
   2320	chipstart = cfi->chips[chipnum].start;
   2321
   2322	/* If it's not bus aligned, do the first byte write */
   2323	if (ofs & (map_bankwidth(map) - 1)) {
   2324		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
   2325		int i = ofs - bus_ofs;
   2326		int n = 0;
   2327		map_word tmp_buf;
   2328
   2329		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
   2330		if (ret)
   2331			return ret;
   2332
   2333		/* Load 'tmp_buf' with old contents of flash */
   2334		tmp_buf = map_read(map, bus_ofs + chipstart);
   2335
   2336		/* Number of bytes to copy from buffer */
   2337		n = min_t(int, len, map_bankwidth(map) - i);
   2338
   2339		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
   2340
   2341		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
   2342					     bus_ofs, tmp_buf);
   2343		if (ret)
   2344			return ret;
   2345
   2346		ofs += n;
   2347		buf += n;
   2348		(*retlen) += n;
   2349		len -= n;
   2350
   2351		if (ofs >> cfi->chipshift) {
   2352			chipnum++;
   2353			ofs = 0;
   2354			if (chipnum == cfi->numchips)
   2355				return 0;
   2356		}
   2357	}
   2358
   2359	/* We are now aligned, write as much as possible */
   2360	while (len >= map_bankwidth(map)) {
   2361		map_word datum;
   2362
   2363		datum = map_word_load(map, buf);
   2364
   2365		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
   2366					     ofs, datum);
   2367		if (ret)
   2368			return ret;
   2369
   2370		ofs += map_bankwidth(map);
   2371		buf += map_bankwidth(map);
   2372		(*retlen) += map_bankwidth(map);
   2373		len -= map_bankwidth(map);
   2374
   2375		if (ofs >> cfi->chipshift) {
   2376			chipnum++;
   2377			ofs = 0;
   2378			if (chipnum == cfi->numchips)
   2379				return 0;
   2380
   2381			chipstart = cfi->chips[chipnum].start;
   2382		}
   2383	}
   2384
   2385	/* Write the trailing bytes if any */
   2386	if (len & (map_bankwidth(map) - 1)) {
   2387		map_word tmp_buf;
   2388
   2389		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
   2390		if (ret)
   2391			return ret;
   2392
   2393		tmp_buf = map_read(map, ofs + chipstart);
   2394
   2395		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
   2396
   2397		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
   2398					     ofs, tmp_buf);
   2399		if (ret)
   2400			return ret;
   2401
   2402		(*retlen) += len;
   2403	}
   2404
   2405	return 0;
   2406}
   2407
   2408
   2409/*
   2410 * Handle devices with one erase region, that only implement
   2411 * the chip erase command.
   2412 */
   2413static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
   2414{
   2415	struct cfi_private *cfi = map->fldrv_priv;
   2416	unsigned long timeo = jiffies + HZ;
   2417	unsigned long int adr;
   2418	DECLARE_WAITQUEUE(wait, current);
   2419	int ret;
   2420	int retry_cnt = 0;
   2421	map_word datum = map_word_ff(map);
   2422
   2423	adr = cfi->addr_unlock1;
   2424
   2425	mutex_lock(&chip->mutex);
   2426	ret = get_chip(map, chip, adr, FL_ERASING);
   2427	if (ret) {
   2428		mutex_unlock(&chip->mutex);
   2429		return ret;
   2430	}
   2431
   2432	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
   2433	       __func__, chip->start);
   2434
   2435	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
   2436	ENABLE_VPP(map);
   2437	xip_disable(map, chip, adr);
   2438
   2439 retry:
   2440	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2441	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   2442	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2443	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2444	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   2445	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2446
   2447	chip->state = FL_ERASING;
   2448	chip->erase_suspended = 0;
   2449	chip->in_progress_block_addr = adr;
   2450	chip->in_progress_block_mask = ~(map->size - 1);
   2451
   2452	INVALIDATE_CACHE_UDELAY(map, chip,
   2453				adr, map->size,
   2454				chip->erase_time*500);
   2455
   2456	timeo = jiffies + (HZ*20);
   2457
   2458	for (;;) {
   2459		if (chip->state != FL_ERASING) {
   2460			/* Someone's suspended the erase. Sleep */
   2461			set_current_state(TASK_UNINTERRUPTIBLE);
   2462			add_wait_queue(&chip->wq, &wait);
   2463			mutex_unlock(&chip->mutex);
   2464			schedule();
   2465			remove_wait_queue(&chip->wq, &wait);
   2466			mutex_lock(&chip->mutex);
   2467			continue;
   2468		}
   2469		if (chip->erase_suspended) {
   2470			/* This erase was suspended and resumed.
   2471			   Adjust the timeout */
   2472			timeo = jiffies + (HZ*20); /* FIXME */
   2473			chip->erase_suspended = 0;
   2474		}
   2475
   2476		if (chip_ready(map, chip, adr, &datum)) {
   2477			if (cfi_check_err_status(map, chip, adr))
   2478				ret = -EIO;
   2479			break;
   2480		}
   2481
   2482		if (time_after(jiffies, timeo)) {
   2483			printk(KERN_WARNING "MTD %s(): software timeout\n",
   2484			       __func__);
   2485			ret = -EIO;
   2486			break;
   2487		}
   2488
   2489		/* Latency issues. Drop the lock, wait a while and retry */
   2490		UDELAY(map, chip, adr, 1000000/HZ);
   2491	}
   2492	/* Did we succeed? */
   2493	if (ret) {
   2494		/* reset on all failures. */
   2495		map_write(map, CMD(0xF0), chip->start);
   2496		/* FIXME - should have reset delay before continuing */
   2497
   2498		if (++retry_cnt <= MAX_RETRIES) {
   2499			ret = 0;
   2500			goto retry;
   2501		}
   2502	}
   2503
   2504	chip->state = FL_READY;
   2505	xip_enable(map, chip, adr);
   2506	DISABLE_VPP(map);
   2507	put_chip(map, chip, adr);
   2508	mutex_unlock(&chip->mutex);
   2509
   2510	return ret;
   2511}
   2512
   2513
   2514static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
   2515{
   2516	struct cfi_private *cfi = map->fldrv_priv;
   2517	unsigned long timeo = jiffies + HZ;
   2518	DECLARE_WAITQUEUE(wait, current);
   2519	int ret;
   2520	int retry_cnt = 0;
   2521	map_word datum = map_word_ff(map);
   2522
   2523	adr += chip->start;
   2524
   2525	mutex_lock(&chip->mutex);
   2526	ret = get_chip(map, chip, adr, FL_ERASING);
   2527	if (ret) {
   2528		mutex_unlock(&chip->mutex);
   2529		return ret;
   2530	}
   2531
   2532	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
   2533		 __func__, adr);
   2534
   2535	XIP_INVAL_CACHED_RANGE(map, adr, len);
   2536	ENABLE_VPP(map);
   2537	xip_disable(map, chip, adr);
   2538
   2539 retry:
   2540	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2541	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   2542	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2543	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
   2544	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
   2545	map_write(map, cfi->sector_erase_cmd, adr);
   2546
   2547	chip->state = FL_ERASING;
   2548	chip->erase_suspended = 0;
   2549	chip->in_progress_block_addr = adr;
   2550	chip->in_progress_block_mask = ~(len - 1);
   2551
   2552	INVALIDATE_CACHE_UDELAY(map, chip,
   2553				adr, len,
   2554				chip->erase_time*500);
   2555
   2556	timeo = jiffies + (HZ*20);
   2557
   2558	for (;;) {
   2559		if (chip->state != FL_ERASING) {
   2560			/* Someone's suspended the erase. Sleep */
   2561			set_current_state(TASK_UNINTERRUPTIBLE);
   2562			add_wait_queue(&chip->wq, &wait);
   2563			mutex_unlock(&chip->mutex);
   2564			schedule();
   2565			remove_wait_queue(&chip->wq, &wait);
   2566			mutex_lock(&chip->mutex);
   2567			continue;
   2568		}
   2569		if (chip->erase_suspended) {
   2570			/* This erase was suspended and resumed.
   2571			   Adjust the timeout */
   2572			timeo = jiffies + (HZ*20); /* FIXME */
   2573			chip->erase_suspended = 0;
   2574		}
   2575
   2576		if (chip_ready(map, chip, adr, &datum)) {
   2577			if (cfi_check_err_status(map, chip, adr))
   2578				ret = -EIO;
   2579			break;
   2580		}
   2581
   2582		if (time_after(jiffies, timeo)) {
   2583			printk(KERN_WARNING "MTD %s(): software timeout\n",
   2584			       __func__);
   2585			ret = -EIO;
   2586			break;
   2587		}
   2588
   2589		/* Latency issues. Drop the lock, wait a while and retry */
   2590		UDELAY(map, chip, adr, 1000000/HZ);
   2591	}
   2592	/* Did we succeed? */
   2593	if (ret) {
   2594		/* reset on all failures. */
   2595		map_write(map, CMD(0xF0), chip->start);
   2596		/* FIXME - should have reset delay before continuing */
   2597
   2598		if (++retry_cnt <= MAX_RETRIES) {
   2599			ret = 0;
   2600			goto retry;
   2601		}
   2602	}
   2603
   2604	chip->state = FL_READY;
   2605	xip_enable(map, chip, adr);
   2606	DISABLE_VPP(map);
   2607	put_chip(map, chip, adr);
   2608	mutex_unlock(&chip->mutex);
   2609	return ret;
   2610}
   2611
   2612
   2613static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
   2614{
   2615	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
   2616				instr->len, NULL);
   2617}
   2618
   2619
   2620static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
   2621{
   2622	struct map_info *map = mtd->priv;
   2623	struct cfi_private *cfi = map->fldrv_priv;
   2624
   2625	if (instr->addr != 0)
   2626		return -EINVAL;
   2627
   2628	if (instr->len != mtd->size)
   2629		return -EINVAL;
   2630
   2631	return do_erase_chip(map, &cfi->chips[0]);
   2632}
   2633
   2634static int do_atmel_lock(struct map_info *map, struct flchip *chip,
   2635			 unsigned long adr, int len, void *thunk)
   2636{
   2637	struct cfi_private *cfi = map->fldrv_priv;
   2638	int ret;
   2639
   2640	mutex_lock(&chip->mutex);
   2641	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
   2642	if (ret)
   2643		goto out_unlock;
   2644	chip->state = FL_LOCKING;
   2645
   2646	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
   2647
   2648	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   2649			 cfi->device_type, NULL);
   2650	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   2651			 cfi->device_type, NULL);
   2652	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
   2653			 cfi->device_type, NULL);
   2654	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   2655			 cfi->device_type, NULL);
   2656	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   2657			 cfi->device_type, NULL);
   2658	map_write(map, CMD(0x40), chip->start + adr);
   2659
   2660	chip->state = FL_READY;
   2661	put_chip(map, chip, adr + chip->start);
   2662	ret = 0;
   2663
   2664out_unlock:
   2665	mutex_unlock(&chip->mutex);
   2666	return ret;
   2667}
   2668
   2669static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
   2670			   unsigned long adr, int len, void *thunk)
   2671{
   2672	struct cfi_private *cfi = map->fldrv_priv;
   2673	int ret;
   2674
   2675	mutex_lock(&chip->mutex);
   2676	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
   2677	if (ret)
   2678		goto out_unlock;
   2679	chip->state = FL_UNLOCKING;
   2680
   2681	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
   2682
   2683	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   2684			 cfi->device_type, NULL);
   2685	map_write(map, CMD(0x70), adr);
   2686
   2687	chip->state = FL_READY;
   2688	put_chip(map, chip, adr + chip->start);
   2689	ret = 0;
   2690
   2691out_unlock:
   2692	mutex_unlock(&chip->mutex);
   2693	return ret;
   2694}
   2695
   2696static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
   2697{
   2698	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
   2699}
   2700
   2701static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
   2702{
   2703	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
   2704}
   2705
   2706/*
   2707 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
   2708 */
   2709
   2710struct ppb_lock {
   2711	struct flchip *chip;
   2712	unsigned long adr;
   2713	int locked;
   2714};
   2715
   2716#define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
   2717#define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
   2718#define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
   2719
   2720static int __maybe_unused do_ppb_xxlock(struct map_info *map,
   2721					struct flchip *chip,
   2722					unsigned long adr, int len, void *thunk)
   2723{
   2724	struct cfi_private *cfi = map->fldrv_priv;
   2725	unsigned long timeo;
   2726	int ret;
   2727
   2728	adr += chip->start;
   2729	mutex_lock(&chip->mutex);
   2730	ret = get_chip(map, chip, adr, FL_LOCKING);
   2731	if (ret) {
   2732		mutex_unlock(&chip->mutex);
   2733		return ret;
   2734	}
   2735
   2736	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
   2737
   2738	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
   2739			 cfi->device_type, NULL);
   2740	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
   2741			 cfi->device_type, NULL);
   2742	/* PPB entry command */
   2743	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
   2744			 cfi->device_type, NULL);
   2745
   2746	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
   2747		chip->state = FL_LOCKING;
   2748		map_write(map, CMD(0xA0), adr);
   2749		map_write(map, CMD(0x00), adr);
   2750	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
   2751		/*
   2752		 * Unlocking of one specific sector is not supported, so we
   2753		 * have to unlock all sectors of this device instead
   2754		 */
   2755		chip->state = FL_UNLOCKING;
   2756		map_write(map, CMD(0x80), chip->start);
   2757		map_write(map, CMD(0x30), chip->start);
   2758	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
   2759		chip->state = FL_JEDEC_QUERY;
   2760		/* Return locked status: 0->locked, 1->unlocked */
   2761		ret = !cfi_read_query(map, adr);
   2762	} else
   2763		BUG();
   2764
   2765	/*
   2766	 * Wait for some time as unlocking of all sectors takes quite long
   2767	 */
   2768	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
   2769	for (;;) {
   2770		if (chip_ready(map, chip, adr, NULL))
   2771			break;
   2772
   2773		if (time_after(jiffies, timeo)) {
   2774			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
   2775			ret = -EIO;
   2776			break;
   2777		}
   2778
   2779		UDELAY(map, chip, adr, 1);
   2780	}
   2781
   2782	/* Exit BC commands */
   2783	map_write(map, CMD(0x90), chip->start);
   2784	map_write(map, CMD(0x00), chip->start);
   2785
   2786	chip->state = FL_READY;
   2787	put_chip(map, chip, adr);
   2788	mutex_unlock(&chip->mutex);
   2789
   2790	return ret;
   2791}
   2792
   2793static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
   2794				       uint64_t len)
   2795{
   2796	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
   2797				DO_XXLOCK_ONEBLOCK_LOCK);
   2798}
   2799
   2800static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
   2801					 uint64_t len)
   2802{
   2803	struct mtd_erase_region_info *regions = mtd->eraseregions;
   2804	struct map_info *map = mtd->priv;
   2805	struct cfi_private *cfi = map->fldrv_priv;
   2806	struct ppb_lock *sect;
   2807	unsigned long adr;
   2808	loff_t offset;
   2809	uint64_t length;
   2810	int chipnum;
   2811	int i;
   2812	int sectors;
   2813	int ret;
   2814	int max_sectors;
   2815
   2816	/*
   2817	 * PPB unlocking always unlocks all sectors of the flash chip.
   2818	 * We need to re-lock all previously locked sectors. So lets
   2819	 * first check the locking status of all sectors and save
   2820	 * it for future use.
   2821	 */
   2822	max_sectors = 0;
   2823	for (i = 0; i < mtd->numeraseregions; i++)
   2824		max_sectors += regions[i].numblocks;
   2825
   2826	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
   2827	if (!sect)
   2828		return -ENOMEM;
   2829
   2830	/*
   2831	 * This code to walk all sectors is a slightly modified version
   2832	 * of the cfi_varsize_frob() code.
   2833	 */
   2834	i = 0;
   2835	chipnum = 0;
   2836	adr = 0;
   2837	sectors = 0;
   2838	offset = 0;
   2839	length = mtd->size;
   2840
   2841	while (length) {
   2842		int size = regions[i].erasesize;
   2843
   2844		/*
   2845		 * Only test sectors that shall not be unlocked. The other
   2846		 * sectors shall be unlocked, so lets keep their locking
   2847		 * status at "unlocked" (locked=0) for the final re-locking.
   2848		 */
   2849		if ((offset < ofs) || (offset >= (ofs + len))) {
   2850			sect[sectors].chip = &cfi->chips[chipnum];
   2851			sect[sectors].adr = adr;
   2852			sect[sectors].locked = do_ppb_xxlock(
   2853				map, &cfi->chips[chipnum], adr, 0,
   2854				DO_XXLOCK_ONEBLOCK_GETLOCK);
   2855		}
   2856
   2857		adr += size;
   2858		offset += size;
   2859		length -= size;
   2860
   2861		if (offset == regions[i].offset + size * regions[i].numblocks)
   2862			i++;
   2863
   2864		if (adr >> cfi->chipshift) {
   2865			if (offset >= (ofs + len))
   2866				break;
   2867			adr = 0;
   2868			chipnum++;
   2869
   2870			if (chipnum >= cfi->numchips)
   2871				break;
   2872		}
   2873
   2874		sectors++;
   2875		if (sectors >= max_sectors) {
   2876			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
   2877			       max_sectors);
   2878			kfree(sect);
   2879			return -EINVAL;
   2880		}
   2881	}
   2882
   2883	/* Now unlock the whole chip */
   2884	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
   2885			       DO_XXLOCK_ONEBLOCK_UNLOCK);
   2886	if (ret) {
   2887		kfree(sect);
   2888		return ret;
   2889	}
   2890
   2891	/*
   2892	 * PPB unlocking always unlocks all sectors of the flash chip.
   2893	 * We need to re-lock all previously locked sectors.
   2894	 */
   2895	for (i = 0; i < sectors; i++) {
   2896		if (sect[i].locked)
   2897			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
   2898				      DO_XXLOCK_ONEBLOCK_LOCK);
   2899	}
   2900
   2901	kfree(sect);
   2902	return ret;
   2903}
   2904
   2905static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
   2906					    uint64_t len)
   2907{
   2908	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
   2909				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
   2910}
   2911
   2912static void cfi_amdstd_sync (struct mtd_info *mtd)
   2913{
   2914	struct map_info *map = mtd->priv;
   2915	struct cfi_private *cfi = map->fldrv_priv;
   2916	int i;
   2917	struct flchip *chip;
   2918	int ret = 0;
   2919	DECLARE_WAITQUEUE(wait, current);
   2920
   2921	for (i=0; !ret && i<cfi->numchips; i++) {
   2922		chip = &cfi->chips[i];
   2923
   2924	retry:
   2925		mutex_lock(&chip->mutex);
   2926
   2927		switch(chip->state) {
   2928		case FL_READY:
   2929		case FL_STATUS:
   2930		case FL_CFI_QUERY:
   2931		case FL_JEDEC_QUERY:
   2932			chip->oldstate = chip->state;
   2933			chip->state = FL_SYNCING;
   2934			/* No need to wake_up() on this state change -
   2935			 * as the whole point is that nobody can do anything
   2936			 * with the chip now anyway.
   2937			 */
   2938			fallthrough;
   2939		case FL_SYNCING:
   2940			mutex_unlock(&chip->mutex);
   2941			break;
   2942
   2943		default:
   2944			/* Not an idle state */
   2945			set_current_state(TASK_UNINTERRUPTIBLE);
   2946			add_wait_queue(&chip->wq, &wait);
   2947
   2948			mutex_unlock(&chip->mutex);
   2949
   2950			schedule();
   2951
   2952			remove_wait_queue(&chip->wq, &wait);
   2953
   2954			goto retry;
   2955		}
   2956	}
   2957
   2958	/* Unlock the chips again */
   2959
   2960	for (i--; i >=0; i--) {
   2961		chip = &cfi->chips[i];
   2962
   2963		mutex_lock(&chip->mutex);
   2964
   2965		if (chip->state == FL_SYNCING) {
   2966			chip->state = chip->oldstate;
   2967			wake_up(&chip->wq);
   2968		}
   2969		mutex_unlock(&chip->mutex);
   2970	}
   2971}
   2972
   2973
   2974static int cfi_amdstd_suspend(struct mtd_info *mtd)
   2975{
   2976	struct map_info *map = mtd->priv;
   2977	struct cfi_private *cfi = map->fldrv_priv;
   2978	int i;
   2979	struct flchip *chip;
   2980	int ret = 0;
   2981
   2982	for (i=0; !ret && i<cfi->numchips; i++) {
   2983		chip = &cfi->chips[i];
   2984
   2985		mutex_lock(&chip->mutex);
   2986
   2987		switch(chip->state) {
   2988		case FL_READY:
   2989		case FL_STATUS:
   2990		case FL_CFI_QUERY:
   2991		case FL_JEDEC_QUERY:
   2992			chip->oldstate = chip->state;
   2993			chip->state = FL_PM_SUSPENDED;
   2994			/* No need to wake_up() on this state change -
   2995			 * as the whole point is that nobody can do anything
   2996			 * with the chip now anyway.
   2997			 */
   2998			break;
   2999		case FL_PM_SUSPENDED:
   3000			break;
   3001
   3002		default:
   3003			ret = -EAGAIN;
   3004			break;
   3005		}
   3006		mutex_unlock(&chip->mutex);
   3007	}
   3008
   3009	/* Unlock the chips again */
   3010
   3011	if (ret) {
   3012		for (i--; i >=0; i--) {
   3013			chip = &cfi->chips[i];
   3014
   3015			mutex_lock(&chip->mutex);
   3016
   3017			if (chip->state == FL_PM_SUSPENDED) {
   3018				chip->state = chip->oldstate;
   3019				wake_up(&chip->wq);
   3020			}
   3021			mutex_unlock(&chip->mutex);
   3022		}
   3023	}
   3024
   3025	return ret;
   3026}
   3027
   3028
   3029static void cfi_amdstd_resume(struct mtd_info *mtd)
   3030{
   3031	struct map_info *map = mtd->priv;
   3032	struct cfi_private *cfi = map->fldrv_priv;
   3033	int i;
   3034	struct flchip *chip;
   3035
   3036	for (i=0; i<cfi->numchips; i++) {
   3037
   3038		chip = &cfi->chips[i];
   3039
   3040		mutex_lock(&chip->mutex);
   3041
   3042		if (chip->state == FL_PM_SUSPENDED) {
   3043			chip->state = FL_READY;
   3044			map_write(map, CMD(0xF0), chip->start);
   3045			wake_up(&chip->wq);
   3046		}
   3047		else
   3048			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
   3049
   3050		mutex_unlock(&chip->mutex);
   3051	}
   3052}
   3053
   3054
   3055/*
   3056 * Ensure that the flash device is put back into read array mode before
   3057 * unloading the driver or rebooting.  On some systems, rebooting while
   3058 * the flash is in query/program/erase mode will prevent the CPU from
   3059 * fetching the bootloader code, requiring a hard reset or power cycle.
   3060 */
   3061static int cfi_amdstd_reset(struct mtd_info *mtd)
   3062{
   3063	struct map_info *map = mtd->priv;
   3064	struct cfi_private *cfi = map->fldrv_priv;
   3065	int i, ret;
   3066	struct flchip *chip;
   3067
   3068	for (i = 0; i < cfi->numchips; i++) {
   3069
   3070		chip = &cfi->chips[i];
   3071
   3072		mutex_lock(&chip->mutex);
   3073
   3074		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
   3075		if (!ret) {
   3076			map_write(map, CMD(0xF0), chip->start);
   3077			chip->state = FL_SHUTDOWN;
   3078			put_chip(map, chip, chip->start);
   3079		}
   3080
   3081		mutex_unlock(&chip->mutex);
   3082	}
   3083
   3084	return 0;
   3085}
   3086
   3087
   3088static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
   3089			       void *v)
   3090{
   3091	struct mtd_info *mtd;
   3092
   3093	mtd = container_of(nb, struct mtd_info, reboot_notifier);
   3094	cfi_amdstd_reset(mtd);
   3095	return NOTIFY_DONE;
   3096}
   3097
   3098
   3099static void cfi_amdstd_destroy(struct mtd_info *mtd)
   3100{
   3101	struct map_info *map = mtd->priv;
   3102	struct cfi_private *cfi = map->fldrv_priv;
   3103
   3104	cfi_amdstd_reset(mtd);
   3105	unregister_reboot_notifier(&mtd->reboot_notifier);
   3106	kfree(cfi->cmdset_priv);
   3107	kfree(cfi->cfiq);
   3108	kfree(cfi);
   3109	kfree(mtd->eraseregions);
   3110}
   3111
   3112MODULE_LICENSE("GPL");
   3113MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
   3114MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
   3115MODULE_ALIAS("cfi_cmdset_0006");
   3116MODULE_ALIAS("cfi_cmdset_0701");