cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

brcmnand.c (89286B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright © 2010-2015 Broadcom Corporation
      4 */
      5
      6#include <linux/clk.h>
      7#include <linux/module.h>
      8#include <linux/init.h>
      9#include <linux/delay.h>
     10#include <linux/device.h>
     11#include <linux/platform_device.h>
     12#include <linux/platform_data/brcmnand.h>
     13#include <linux/err.h>
     14#include <linux/completion.h>
     15#include <linux/interrupt.h>
     16#include <linux/spinlock.h>
     17#include <linux/dma-mapping.h>
     18#include <linux/ioport.h>
     19#include <linux/bug.h>
     20#include <linux/kernel.h>
     21#include <linux/bitops.h>
     22#include <linux/mm.h>
     23#include <linux/mtd/mtd.h>
     24#include <linux/mtd/rawnand.h>
     25#include <linux/mtd/partitions.h>
     26#include <linux/of.h>
     27#include <linux/of_platform.h>
     28#include <linux/slab.h>
     29#include <linux/static_key.h>
     30#include <linux/list.h>
     31#include <linux/log2.h>
     32
     33#include "brcmnand.h"
     34
     35/*
     36 * This flag controls if WP stays on between erase/write commands to mitigate
     37 * flash corruption due to power glitches. Values:
     38 * 0: NAND_WP is not used or not available
     39 * 1: NAND_WP is set by default, cleared for erase/write operations
     40 * 2: NAND_WP is always cleared
     41 */
     42static int wp_on = 1;
     43module_param(wp_on, int, 0444);
     44
     45/***********************************************************************
     46 * Definitions
     47 ***********************************************************************/
     48
     49#define DRV_NAME			"brcmnand"
     50
     51#define CMD_NULL			0x00
     52#define CMD_PAGE_READ			0x01
     53#define CMD_SPARE_AREA_READ		0x02
     54#define CMD_STATUS_READ			0x03
     55#define CMD_PROGRAM_PAGE		0x04
     56#define CMD_PROGRAM_SPARE_AREA		0x05
     57#define CMD_COPY_BACK			0x06
     58#define CMD_DEVICE_ID_READ		0x07
     59#define CMD_BLOCK_ERASE			0x08
     60#define CMD_FLASH_RESET			0x09
     61#define CMD_BLOCKS_LOCK			0x0a
     62#define CMD_BLOCKS_LOCK_DOWN		0x0b
     63#define CMD_BLOCKS_UNLOCK		0x0c
     64#define CMD_READ_BLOCKS_LOCK_STATUS	0x0d
     65#define CMD_PARAMETER_READ		0x0e
     66#define CMD_PARAMETER_CHANGE_COL	0x0f
     67#define CMD_LOW_LEVEL_OP		0x10
     68
     69struct brcm_nand_dma_desc {
     70	u32 next_desc;
     71	u32 next_desc_ext;
     72	u32 cmd_irq;
     73	u32 dram_addr;
     74	u32 dram_addr_ext;
     75	u32 tfr_len;
     76	u32 total_len;
     77	u32 flash_addr;
     78	u32 flash_addr_ext;
     79	u32 cs;
     80	u32 pad2[5];
     81	u32 status_valid;
     82} __packed;
     83
     84/* Bitfields for brcm_nand_dma_desc::status_valid */
     85#define FLASH_DMA_ECC_ERROR	(1 << 8)
     86#define FLASH_DMA_CORR_ERROR	(1 << 9)
     87
     88/* Bitfields for DMA_MODE */
     89#define FLASH_DMA_MODE_STOP_ON_ERROR	BIT(1) /* stop in Uncorr ECC error */
     90#define FLASH_DMA_MODE_MODE		BIT(0) /* link list */
     91#define FLASH_DMA_MODE_MASK		(FLASH_DMA_MODE_STOP_ON_ERROR |	\
     92						FLASH_DMA_MODE_MODE)
     93
     94/* 512B flash cache in the NAND controller HW */
     95#define FC_SHIFT		9U
     96#define FC_BYTES		512U
     97#define FC_WORDS		(FC_BYTES >> 2)
     98
     99#define BRCMNAND_MIN_PAGESIZE	512
    100#define BRCMNAND_MIN_BLOCKSIZE	(8 * 1024)
    101#define BRCMNAND_MIN_DEVSIZE	(4ULL * 1024 * 1024)
    102
    103#define NAND_CTRL_RDY			(INTFC_CTLR_READY | INTFC_FLASH_READY)
    104#define NAND_POLL_STATUS_TIMEOUT_MS	100
    105
    106#define EDU_CMD_WRITE          0x00
    107#define EDU_CMD_READ           0x01
    108#define EDU_STATUS_ACTIVE      BIT(0)
    109#define EDU_ERR_STATUS_ERRACK  BIT(0)
    110#define EDU_DONE_MASK		GENMASK(1, 0)
    111
    112#define EDU_CONFIG_MODE_NAND   BIT(0)
    113#define EDU_CONFIG_SWAP_BYTE   BIT(1)
    114#ifdef CONFIG_CPU_BIG_ENDIAN
    115#define EDU_CONFIG_SWAP_CFG     EDU_CONFIG_SWAP_BYTE
    116#else
    117#define EDU_CONFIG_SWAP_CFG     0
    118#endif
    119
    120/* edu registers */
    121enum edu_reg {
    122	EDU_CONFIG = 0,
    123	EDU_DRAM_ADDR,
    124	EDU_EXT_ADDR,
    125	EDU_LENGTH,
    126	EDU_CMD,
    127	EDU_STOP,
    128	EDU_STATUS,
    129	EDU_DONE,
    130	EDU_ERR_STATUS,
    131};
    132
    133static const u16  edu_regs[] = {
    134	[EDU_CONFIG] = 0x00,
    135	[EDU_DRAM_ADDR] = 0x04,
    136	[EDU_EXT_ADDR] = 0x08,
    137	[EDU_LENGTH] = 0x0c,
    138	[EDU_CMD] = 0x10,
    139	[EDU_STOP] = 0x14,
    140	[EDU_STATUS] = 0x18,
    141	[EDU_DONE] = 0x1c,
    142	[EDU_ERR_STATUS] = 0x20,
    143};
    144
    145/* flash_dma registers */
    146enum flash_dma_reg {
    147	FLASH_DMA_REVISION = 0,
    148	FLASH_DMA_FIRST_DESC,
    149	FLASH_DMA_FIRST_DESC_EXT,
    150	FLASH_DMA_CTRL,
    151	FLASH_DMA_MODE,
    152	FLASH_DMA_STATUS,
    153	FLASH_DMA_INTERRUPT_DESC,
    154	FLASH_DMA_INTERRUPT_DESC_EXT,
    155	FLASH_DMA_ERROR_STATUS,
    156	FLASH_DMA_CURRENT_DESC,
    157	FLASH_DMA_CURRENT_DESC_EXT,
    158};
    159
    160/* flash_dma registers v0*/
    161static const u16 flash_dma_regs_v0[] = {
    162	[FLASH_DMA_REVISION]		= 0x00,
    163	[FLASH_DMA_FIRST_DESC]		= 0x04,
    164	[FLASH_DMA_CTRL]		= 0x08,
    165	[FLASH_DMA_MODE]		= 0x0c,
    166	[FLASH_DMA_STATUS]		= 0x10,
    167	[FLASH_DMA_INTERRUPT_DESC]	= 0x14,
    168	[FLASH_DMA_ERROR_STATUS]	= 0x18,
    169	[FLASH_DMA_CURRENT_DESC]	= 0x1c,
    170};
    171
    172/* flash_dma registers v1*/
    173static const u16 flash_dma_regs_v1[] = {
    174	[FLASH_DMA_REVISION]		= 0x00,
    175	[FLASH_DMA_FIRST_DESC]		= 0x04,
    176	[FLASH_DMA_FIRST_DESC_EXT]	= 0x08,
    177	[FLASH_DMA_CTRL]		= 0x0c,
    178	[FLASH_DMA_MODE]		= 0x10,
    179	[FLASH_DMA_STATUS]		= 0x14,
    180	[FLASH_DMA_INTERRUPT_DESC]	= 0x18,
    181	[FLASH_DMA_INTERRUPT_DESC_EXT]	= 0x1c,
    182	[FLASH_DMA_ERROR_STATUS]	= 0x20,
    183	[FLASH_DMA_CURRENT_DESC]	= 0x24,
    184	[FLASH_DMA_CURRENT_DESC_EXT]	= 0x28,
    185};
    186
    187/* flash_dma registers v4 */
    188static const u16 flash_dma_regs_v4[] = {
    189	[FLASH_DMA_REVISION]		= 0x00,
    190	[FLASH_DMA_FIRST_DESC]		= 0x08,
    191	[FLASH_DMA_FIRST_DESC_EXT]	= 0x0c,
    192	[FLASH_DMA_CTRL]		= 0x10,
    193	[FLASH_DMA_MODE]		= 0x14,
    194	[FLASH_DMA_STATUS]		= 0x18,
    195	[FLASH_DMA_INTERRUPT_DESC]	= 0x20,
    196	[FLASH_DMA_INTERRUPT_DESC_EXT]	= 0x24,
    197	[FLASH_DMA_ERROR_STATUS]	= 0x28,
    198	[FLASH_DMA_CURRENT_DESC]	= 0x30,
    199	[FLASH_DMA_CURRENT_DESC_EXT]	= 0x34,
    200};
    201
    202/* Controller feature flags */
    203enum {
    204	BRCMNAND_HAS_1K_SECTORS			= BIT(0),
    205	BRCMNAND_HAS_PREFETCH			= BIT(1),
    206	BRCMNAND_HAS_CACHE_MODE			= BIT(2),
    207	BRCMNAND_HAS_WP				= BIT(3),
    208};
    209
    210struct brcmnand_host;
    211
    212static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key);
    213
    214struct brcmnand_controller {
    215	struct device		*dev;
    216	struct nand_controller	controller;
    217	void __iomem		*nand_base;
    218	void __iomem		*nand_fc; /* flash cache */
    219	void __iomem		*flash_dma_base;
    220	int			irq;
    221	unsigned int		dma_irq;
    222	int			nand_version;
    223
    224	/* Some SoCs provide custom interrupt status register(s) */
    225	struct brcmnand_soc	*soc;
    226
    227	/* Some SoCs have a gateable clock for the controller */
    228	struct clk		*clk;
    229
    230	int			cmd_pending;
    231	bool			dma_pending;
    232	bool                    edu_pending;
    233	struct completion	done;
    234	struct completion	dma_done;
    235	struct completion       edu_done;
    236
    237	/* List of NAND hosts (one for each chip-select) */
    238	struct list_head host_list;
    239
    240	/* EDU info, per-transaction */
    241	const u16               *edu_offsets;
    242	void __iomem            *edu_base;
    243	int			edu_irq;
    244	int                     edu_count;
    245	u64                     edu_dram_addr;
    246	u32                     edu_ext_addr;
    247	u32                     edu_cmd;
    248	u32                     edu_config;
    249	int			sas; /* spare area size, per flash cache */
    250	int			sector_size_1k;
    251	u8			*oob;
    252
    253	/* flash_dma reg */
    254	const u16		*flash_dma_offsets;
    255	struct brcm_nand_dma_desc *dma_desc;
    256	dma_addr_t		dma_pa;
    257
    258	int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
    259			 u8 *oob, u32 len, u8 dma_cmd);
    260
    261	/* in-memory cache of the FLASH_CACHE, used only for some commands */
    262	u8			flash_cache[FC_BYTES];
    263
    264	/* Controller revision details */
    265	const u16		*reg_offsets;
    266	unsigned int		reg_spacing; /* between CS1, CS2, ... regs */
    267	const u8		*cs_offsets; /* within each chip-select */
    268	const u8		*cs0_offsets; /* within CS0, if different */
    269	unsigned int		max_block_size;
    270	const unsigned int	*block_sizes;
    271	unsigned int		max_page_size;
    272	const unsigned int	*page_sizes;
    273	unsigned int		page_size_shift;
    274	unsigned int		max_oob;
    275	u32			features;
    276
    277	/* for low-power standby/resume only */
    278	u32			nand_cs_nand_select;
    279	u32			nand_cs_nand_xor;
    280	u32			corr_stat_threshold;
    281	u32			flash_dma_mode;
    282	u32                     flash_edu_mode;
    283	bool			pio_poll_mode;
    284};
    285
    286struct brcmnand_cfg {
    287	u64			device_size;
    288	unsigned int		block_size;
    289	unsigned int		page_size;
    290	unsigned int		spare_area_size;
    291	unsigned int		device_width;
    292	unsigned int		col_adr_bytes;
    293	unsigned int		blk_adr_bytes;
    294	unsigned int		ful_adr_bytes;
    295	unsigned int		sector_size_1k;
    296	unsigned int		ecc_level;
    297	/* use for low-power standby/resume only */
    298	u32			acc_control;
    299	u32			config;
    300	u32			config_ext;
    301	u32			timing_1;
    302	u32			timing_2;
    303};
    304
    305struct brcmnand_host {
    306	struct list_head	node;
    307
    308	struct nand_chip	chip;
    309	struct platform_device	*pdev;
    310	int			cs;
    311
    312	unsigned int		last_cmd;
    313	unsigned int		last_byte;
    314	u64			last_addr;
    315	struct brcmnand_cfg	hwcfg;
    316	struct brcmnand_controller *ctrl;
    317};
    318
    319enum brcmnand_reg {
    320	BRCMNAND_CMD_START = 0,
    321	BRCMNAND_CMD_EXT_ADDRESS,
    322	BRCMNAND_CMD_ADDRESS,
    323	BRCMNAND_INTFC_STATUS,
    324	BRCMNAND_CS_SELECT,
    325	BRCMNAND_CS_XOR,
    326	BRCMNAND_LL_OP,
    327	BRCMNAND_CS0_BASE,
    328	BRCMNAND_CS1_BASE,		/* CS1 regs, if non-contiguous */
    329	BRCMNAND_CORR_THRESHOLD,
    330	BRCMNAND_CORR_THRESHOLD_EXT,
    331	BRCMNAND_UNCORR_COUNT,
    332	BRCMNAND_CORR_COUNT,
    333	BRCMNAND_CORR_EXT_ADDR,
    334	BRCMNAND_CORR_ADDR,
    335	BRCMNAND_UNCORR_EXT_ADDR,
    336	BRCMNAND_UNCORR_ADDR,
    337	BRCMNAND_SEMAPHORE,
    338	BRCMNAND_ID,
    339	BRCMNAND_ID_EXT,
    340	BRCMNAND_LL_RDATA,
    341	BRCMNAND_OOB_READ_BASE,
    342	BRCMNAND_OOB_READ_10_BASE,	/* offset 0x10, if non-contiguous */
    343	BRCMNAND_OOB_WRITE_BASE,
    344	BRCMNAND_OOB_WRITE_10_BASE,	/* offset 0x10, if non-contiguous */
    345	BRCMNAND_FC_BASE,
    346};
    347
    348/* BRCMNAND v2.1-v2.2 */
    349static const u16 brcmnand_regs_v21[] = {
    350	[BRCMNAND_CMD_START]		=  0x04,
    351	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
    352	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
    353	[BRCMNAND_INTFC_STATUS]		=  0x5c,
    354	[BRCMNAND_CS_SELECT]		=  0x14,
    355	[BRCMNAND_CS_XOR]		=  0x18,
    356	[BRCMNAND_LL_OP]		=     0,
    357	[BRCMNAND_CS0_BASE]		=  0x40,
    358	[BRCMNAND_CS1_BASE]		=     0,
    359	[BRCMNAND_CORR_THRESHOLD]	=     0,
    360	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
    361	[BRCMNAND_UNCORR_COUNT]		=     0,
    362	[BRCMNAND_CORR_COUNT]		=     0,
    363	[BRCMNAND_CORR_EXT_ADDR]	=  0x60,
    364	[BRCMNAND_CORR_ADDR]		=  0x64,
    365	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x68,
    366	[BRCMNAND_UNCORR_ADDR]		=  0x6c,
    367	[BRCMNAND_SEMAPHORE]		=  0x50,
    368	[BRCMNAND_ID]			=  0x54,
    369	[BRCMNAND_ID_EXT]		=     0,
    370	[BRCMNAND_LL_RDATA]		=     0,
    371	[BRCMNAND_OOB_READ_BASE]	=  0x20,
    372	[BRCMNAND_OOB_READ_10_BASE]	=     0,
    373	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
    374	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
    375	[BRCMNAND_FC_BASE]		= 0x200,
    376};
    377
    378/* BRCMNAND v3.3-v4.0 */
    379static const u16 brcmnand_regs_v33[] = {
    380	[BRCMNAND_CMD_START]		=  0x04,
    381	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
    382	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
    383	[BRCMNAND_INTFC_STATUS]		=  0x6c,
    384	[BRCMNAND_CS_SELECT]		=  0x14,
    385	[BRCMNAND_CS_XOR]		=  0x18,
    386	[BRCMNAND_LL_OP]		= 0x178,
    387	[BRCMNAND_CS0_BASE]		=  0x40,
    388	[BRCMNAND_CS1_BASE]		=  0xd0,
    389	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
    390	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
    391	[BRCMNAND_UNCORR_COUNT]		=     0,
    392	[BRCMNAND_CORR_COUNT]		=     0,
    393	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
    394	[BRCMNAND_CORR_ADDR]		=  0x74,
    395	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
    396	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
    397	[BRCMNAND_SEMAPHORE]		=  0x58,
    398	[BRCMNAND_ID]			=  0x60,
    399	[BRCMNAND_ID_EXT]		=  0x64,
    400	[BRCMNAND_LL_RDATA]		= 0x17c,
    401	[BRCMNAND_OOB_READ_BASE]	=  0x20,
    402	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
    403	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
    404	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
    405	[BRCMNAND_FC_BASE]		= 0x200,
    406};
    407
    408/* BRCMNAND v5.0 */
    409static const u16 brcmnand_regs_v50[] = {
    410	[BRCMNAND_CMD_START]		=  0x04,
    411	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
    412	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
    413	[BRCMNAND_INTFC_STATUS]		=  0x6c,
    414	[BRCMNAND_CS_SELECT]		=  0x14,
    415	[BRCMNAND_CS_XOR]		=  0x18,
    416	[BRCMNAND_LL_OP]		= 0x178,
    417	[BRCMNAND_CS0_BASE]		=  0x40,
    418	[BRCMNAND_CS1_BASE]		=  0xd0,
    419	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
    420	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
    421	[BRCMNAND_UNCORR_COUNT]		=     0,
    422	[BRCMNAND_CORR_COUNT]		=     0,
    423	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
    424	[BRCMNAND_CORR_ADDR]		=  0x74,
    425	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
    426	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
    427	[BRCMNAND_SEMAPHORE]		=  0x58,
    428	[BRCMNAND_ID]			=  0x60,
    429	[BRCMNAND_ID_EXT]		=  0x64,
    430	[BRCMNAND_LL_RDATA]		= 0x17c,
    431	[BRCMNAND_OOB_READ_BASE]	=  0x20,
    432	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
    433	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
    434	[BRCMNAND_OOB_WRITE_10_BASE]	= 0x140,
    435	[BRCMNAND_FC_BASE]		= 0x200,
    436};
    437
    438/* BRCMNAND v6.0 - v7.1 */
    439static const u16 brcmnand_regs_v60[] = {
    440	[BRCMNAND_CMD_START]		=  0x04,
    441	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
    442	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
    443	[BRCMNAND_INTFC_STATUS]		=  0x14,
    444	[BRCMNAND_CS_SELECT]		=  0x18,
    445	[BRCMNAND_CS_XOR]		=  0x1c,
    446	[BRCMNAND_LL_OP]		=  0x20,
    447	[BRCMNAND_CS0_BASE]		=  0x50,
    448	[BRCMNAND_CS1_BASE]		=     0,
    449	[BRCMNAND_CORR_THRESHOLD]	=  0xc0,
    450	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xc4,
    451	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
    452	[BRCMNAND_CORR_COUNT]		= 0x100,
    453	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
    454	[BRCMNAND_CORR_ADDR]		= 0x110,
    455	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
    456	[BRCMNAND_UNCORR_ADDR]		= 0x118,
    457	[BRCMNAND_SEMAPHORE]		= 0x150,
    458	[BRCMNAND_ID]			= 0x194,
    459	[BRCMNAND_ID_EXT]		= 0x198,
    460	[BRCMNAND_LL_RDATA]		= 0x19c,
    461	[BRCMNAND_OOB_READ_BASE]	= 0x200,
    462	[BRCMNAND_OOB_READ_10_BASE]	=     0,
    463	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
    464	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
    465	[BRCMNAND_FC_BASE]		= 0x400,
    466};
    467
    468/* BRCMNAND v7.1 */
    469static const u16 brcmnand_regs_v71[] = {
    470	[BRCMNAND_CMD_START]		=  0x04,
    471	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
    472	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
    473	[BRCMNAND_INTFC_STATUS]		=  0x14,
    474	[BRCMNAND_CS_SELECT]		=  0x18,
    475	[BRCMNAND_CS_XOR]		=  0x1c,
    476	[BRCMNAND_LL_OP]		=  0x20,
    477	[BRCMNAND_CS0_BASE]		=  0x50,
    478	[BRCMNAND_CS1_BASE]		=     0,
    479	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
    480	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
    481	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
    482	[BRCMNAND_CORR_COUNT]		= 0x100,
    483	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
    484	[BRCMNAND_CORR_ADDR]		= 0x110,
    485	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
    486	[BRCMNAND_UNCORR_ADDR]		= 0x118,
    487	[BRCMNAND_SEMAPHORE]		= 0x150,
    488	[BRCMNAND_ID]			= 0x194,
    489	[BRCMNAND_ID_EXT]		= 0x198,
    490	[BRCMNAND_LL_RDATA]		= 0x19c,
    491	[BRCMNAND_OOB_READ_BASE]	= 0x200,
    492	[BRCMNAND_OOB_READ_10_BASE]	=     0,
    493	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
    494	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
    495	[BRCMNAND_FC_BASE]		= 0x400,
    496};
    497
    498/* BRCMNAND v7.2 */
    499static const u16 brcmnand_regs_v72[] = {
    500	[BRCMNAND_CMD_START]		=  0x04,
    501	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
    502	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
    503	[BRCMNAND_INTFC_STATUS]		=  0x14,
    504	[BRCMNAND_CS_SELECT]		=  0x18,
    505	[BRCMNAND_CS_XOR]		=  0x1c,
    506	[BRCMNAND_LL_OP]		=  0x20,
    507	[BRCMNAND_CS0_BASE]		=  0x50,
    508	[BRCMNAND_CS1_BASE]		=     0,
    509	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
    510	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
    511	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
    512	[BRCMNAND_CORR_COUNT]		= 0x100,
    513	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
    514	[BRCMNAND_CORR_ADDR]		= 0x110,
    515	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
    516	[BRCMNAND_UNCORR_ADDR]		= 0x118,
    517	[BRCMNAND_SEMAPHORE]		= 0x150,
    518	[BRCMNAND_ID]			= 0x194,
    519	[BRCMNAND_ID_EXT]		= 0x198,
    520	[BRCMNAND_LL_RDATA]		= 0x19c,
    521	[BRCMNAND_OOB_READ_BASE]	= 0x200,
    522	[BRCMNAND_OOB_READ_10_BASE]	=     0,
    523	[BRCMNAND_OOB_WRITE_BASE]	= 0x400,
    524	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
    525	[BRCMNAND_FC_BASE]		= 0x600,
    526};
    527
    528enum brcmnand_cs_reg {
    529	BRCMNAND_CS_CFG_EXT = 0,
    530	BRCMNAND_CS_CFG,
    531	BRCMNAND_CS_ACC_CONTROL,
    532	BRCMNAND_CS_TIMING1,
    533	BRCMNAND_CS_TIMING2,
    534};
    535
    536/* Per chip-select offsets for v7.1 */
    537static const u8 brcmnand_cs_offsets_v71[] = {
    538	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
    539	[BRCMNAND_CS_CFG_EXT]		= 0x04,
    540	[BRCMNAND_CS_CFG]		= 0x08,
    541	[BRCMNAND_CS_TIMING1]		= 0x0c,
    542	[BRCMNAND_CS_TIMING2]		= 0x10,
    543};
    544
    545/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
    546static const u8 brcmnand_cs_offsets[] = {
    547	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
    548	[BRCMNAND_CS_CFG_EXT]		= 0x04,
    549	[BRCMNAND_CS_CFG]		= 0x04,
    550	[BRCMNAND_CS_TIMING1]		= 0x08,
    551	[BRCMNAND_CS_TIMING2]		= 0x0c,
    552};
    553
    554/* Per chip-select offset for <= v5.0 on CS0 only */
    555static const u8 brcmnand_cs_offsets_cs0[] = {
    556	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
    557	[BRCMNAND_CS_CFG_EXT]		= 0x08,
    558	[BRCMNAND_CS_CFG]		= 0x08,
    559	[BRCMNAND_CS_TIMING1]		= 0x10,
    560	[BRCMNAND_CS_TIMING2]		= 0x14,
    561};
    562
    563/*
    564 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
    565 * one config register, but once the bitfields overflowed, newer controllers
    566 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
    567 */
    568enum {
    569	CFG_BLK_ADR_BYTES_SHIFT		= 8,
    570	CFG_COL_ADR_BYTES_SHIFT		= 12,
    571	CFG_FUL_ADR_BYTES_SHIFT		= 16,
    572	CFG_BUS_WIDTH_SHIFT		= 23,
    573	CFG_BUS_WIDTH			= BIT(CFG_BUS_WIDTH_SHIFT),
    574	CFG_DEVICE_SIZE_SHIFT		= 24,
    575
    576	/* Only for v2.1 */
    577	CFG_PAGE_SIZE_SHIFT_v2_1	= 30,
    578
    579	/* Only for pre-v7.1 (with no CFG_EXT register) */
    580	CFG_PAGE_SIZE_SHIFT		= 20,
    581	CFG_BLK_SIZE_SHIFT		= 28,
    582
    583	/* Only for v7.1+ (with CFG_EXT register) */
    584	CFG_EXT_PAGE_SIZE_SHIFT		= 0,
    585	CFG_EXT_BLK_SIZE_SHIFT		= 4,
    586};
    587
    588/* BRCMNAND_INTFC_STATUS */
    589enum {
    590	INTFC_FLASH_STATUS		= GENMASK(7, 0),
    591
    592	INTFC_ERASED			= BIT(27),
    593	INTFC_OOB_VALID			= BIT(28),
    594	INTFC_CACHE_VALID		= BIT(29),
    595	INTFC_FLASH_READY		= BIT(30),
    596	INTFC_CTLR_READY		= BIT(31),
    597};
    598
    599static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
    600{
    601#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
    602	return static_branch_unlikely(&brcmnand_soc_has_ops_key);
    603#else
    604	return false;
    605#endif
    606}
    607
    608static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
    609{
    610	if (brcmnand_non_mmio_ops(ctrl))
    611		return brcmnand_soc_read(ctrl->soc, offs);
    612	return brcmnand_readl(ctrl->nand_base + offs);
    613}
    614
    615static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
    616				 u32 val)
    617{
    618	if (brcmnand_non_mmio_ops(ctrl))
    619		brcmnand_soc_write(ctrl->soc, val, offs);
    620	else
    621		brcmnand_writel(val, ctrl->nand_base + offs);
    622}
    623
    624static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
    625{
    626	static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
    627	static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
    628	static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
    629	static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
    630	static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
    631	static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
    632	static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
    633
    634	ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
    635
    636	/* Only support v2.1+ */
    637	if (ctrl->nand_version < 0x0201) {
    638		dev_err(ctrl->dev, "version %#x not supported\n",
    639			ctrl->nand_version);
    640		return -ENODEV;
    641	}
    642
    643	/* Register offsets */
    644	if (ctrl->nand_version >= 0x0702)
    645		ctrl->reg_offsets = brcmnand_regs_v72;
    646	else if (ctrl->nand_version == 0x0701)
    647		ctrl->reg_offsets = brcmnand_regs_v71;
    648	else if (ctrl->nand_version >= 0x0600)
    649		ctrl->reg_offsets = brcmnand_regs_v60;
    650	else if (ctrl->nand_version >= 0x0500)
    651		ctrl->reg_offsets = brcmnand_regs_v50;
    652	else if (ctrl->nand_version >= 0x0303)
    653		ctrl->reg_offsets = brcmnand_regs_v33;
    654	else if (ctrl->nand_version >= 0x0201)
    655		ctrl->reg_offsets = brcmnand_regs_v21;
    656
    657	/* Chip-select stride */
    658	if (ctrl->nand_version >= 0x0701)
    659		ctrl->reg_spacing = 0x14;
    660	else
    661		ctrl->reg_spacing = 0x10;
    662
    663	/* Per chip-select registers */
    664	if (ctrl->nand_version >= 0x0701) {
    665		ctrl->cs_offsets = brcmnand_cs_offsets_v71;
    666	} else {
    667		ctrl->cs_offsets = brcmnand_cs_offsets;
    668
    669		/* v3.3-5.0 have a different CS0 offset layout */
    670		if (ctrl->nand_version >= 0x0303 &&
    671		    ctrl->nand_version <= 0x0500)
    672			ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
    673	}
    674
    675	/* Page / block sizes */
    676	if (ctrl->nand_version >= 0x0701) {
    677		/* >= v7.1 use nice power-of-2 values! */
    678		ctrl->max_page_size = 16 * 1024;
    679		ctrl->max_block_size = 2 * 1024 * 1024;
    680	} else {
    681		if (ctrl->nand_version >= 0x0304)
    682			ctrl->page_sizes = page_sizes_v3_4;
    683		else if (ctrl->nand_version >= 0x0202)
    684			ctrl->page_sizes = page_sizes_v2_2;
    685		else
    686			ctrl->page_sizes = page_sizes_v2_1;
    687
    688		if (ctrl->nand_version >= 0x0202)
    689			ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
    690		else
    691			ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
    692
    693		if (ctrl->nand_version >= 0x0600)
    694			ctrl->block_sizes = block_sizes_v6;
    695		else if (ctrl->nand_version >= 0x0400)
    696			ctrl->block_sizes = block_sizes_v4;
    697		else if (ctrl->nand_version >= 0x0202)
    698			ctrl->block_sizes = block_sizes_v2_2;
    699		else
    700			ctrl->block_sizes = block_sizes_v2_1;
    701
    702		if (ctrl->nand_version < 0x0400) {
    703			if (ctrl->nand_version < 0x0202)
    704				ctrl->max_page_size = 2048;
    705			else
    706				ctrl->max_page_size = 4096;
    707			ctrl->max_block_size = 512 * 1024;
    708		}
    709	}
    710
    711	/* Maximum spare area sector size (per 512B) */
    712	if (ctrl->nand_version == 0x0702)
    713		ctrl->max_oob = 128;
    714	else if (ctrl->nand_version >= 0x0600)
    715		ctrl->max_oob = 64;
    716	else if (ctrl->nand_version >= 0x0500)
    717		ctrl->max_oob = 32;
    718	else
    719		ctrl->max_oob = 16;
    720
    721	/* v6.0 and newer (except v6.1) have prefetch support */
    722	if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
    723		ctrl->features |= BRCMNAND_HAS_PREFETCH;
    724
    725	/*
    726	 * v6.x has cache mode, but it's implemented differently. Ignore it for
    727	 * now.
    728	 */
    729	if (ctrl->nand_version >= 0x0700)
    730		ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
    731
    732	if (ctrl->nand_version >= 0x0500)
    733		ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
    734
    735	if (ctrl->nand_version >= 0x0700)
    736		ctrl->features |= BRCMNAND_HAS_WP;
    737	else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
    738		ctrl->features |= BRCMNAND_HAS_WP;
    739
    740	return 0;
    741}
    742
    743static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
    744{
    745	/* flash_dma register offsets */
    746	if (ctrl->nand_version >= 0x0703)
    747		ctrl->flash_dma_offsets = flash_dma_regs_v4;
    748	else if (ctrl->nand_version == 0x0602)
    749		ctrl->flash_dma_offsets = flash_dma_regs_v0;
    750	else
    751		ctrl->flash_dma_offsets = flash_dma_regs_v1;
    752}
    753
    754static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
    755		enum brcmnand_reg reg)
    756{
    757	u16 offs = ctrl->reg_offsets[reg];
    758
    759	if (offs)
    760		return nand_readreg(ctrl, offs);
    761	else
    762		return 0;
    763}
    764
    765static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
    766				      enum brcmnand_reg reg, u32 val)
    767{
    768	u16 offs = ctrl->reg_offsets[reg];
    769
    770	if (offs)
    771		nand_writereg(ctrl, offs, val);
    772}
    773
    774static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
    775				    enum brcmnand_reg reg, u32 mask, unsigned
    776				    int shift, u32 val)
    777{
    778	u32 tmp = brcmnand_read_reg(ctrl, reg);
    779
    780	tmp &= ~mask;
    781	tmp |= val << shift;
    782	brcmnand_write_reg(ctrl, reg, tmp);
    783}
    784
    785static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
    786{
    787	if (brcmnand_non_mmio_ops(ctrl))
    788		return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
    789	return __raw_readl(ctrl->nand_fc + word * 4);
    790}
    791
    792static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
    793				     int word, u32 val)
    794{
    795	if (brcmnand_non_mmio_ops(ctrl))
    796		brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
    797	else
    798		__raw_writel(val, ctrl->nand_fc + word * 4);
    799}
    800
    801static inline void edu_writel(struct brcmnand_controller *ctrl,
    802			      enum edu_reg reg, u32 val)
    803{
    804	u16 offs = ctrl->edu_offsets[reg];
    805
    806	brcmnand_writel(val, ctrl->edu_base + offs);
    807}
    808
    809static inline u32 edu_readl(struct brcmnand_controller *ctrl,
    810			    enum edu_reg reg)
    811{
    812	u16 offs = ctrl->edu_offsets[reg];
    813
    814	return brcmnand_readl(ctrl->edu_base + offs);
    815}
    816
    817static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
    818{
    819
    820	/* Clear error addresses */
    821	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
    822	brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
    823	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
    824	brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
    825}
    826
    827static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
    828{
    829	u64 err_addr;
    830
    831	err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
    832	err_addr |= ((u64)(brcmnand_read_reg(ctrl,
    833					     BRCMNAND_UNCORR_EXT_ADDR)
    834					     & 0xffff) << 32);
    835
    836	return err_addr;
    837}
    838
    839static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
    840{
    841	u64 err_addr;
    842
    843	err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
    844	err_addr |= ((u64)(brcmnand_read_reg(ctrl,
    845					     BRCMNAND_CORR_EXT_ADDR)
    846					     & 0xffff) << 32);
    847
    848	return err_addr;
    849}
    850
    851static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
    852{
    853	struct nand_chip *chip =  mtd_to_nand(mtd);
    854	struct brcmnand_host *host = nand_get_controller_data(chip);
    855	struct brcmnand_controller *ctrl = host->ctrl;
    856
    857	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
    858			   (host->cs << 16) | ((addr >> 32) & 0xffff));
    859	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
    860	brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
    861			   lower_32_bits(addr));
    862	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
    863}
    864
    865static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
    866				     enum brcmnand_cs_reg reg)
    867{
    868	u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
    869	u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
    870	u8 cs_offs;
    871
    872	if (cs == 0 && ctrl->cs0_offsets)
    873		cs_offs = ctrl->cs0_offsets[reg];
    874	else
    875		cs_offs = ctrl->cs_offsets[reg];
    876
    877	if (cs && offs_cs1)
    878		return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
    879
    880	return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
    881}
    882
    883static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
    884{
    885	if (ctrl->nand_version < 0x0600)
    886		return 1;
    887	return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
    888}
    889
    890static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
    891{
    892	struct brcmnand_controller *ctrl = host->ctrl;
    893	unsigned int shift = 0, bits;
    894	enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
    895	int cs = host->cs;
    896
    897	if (!ctrl->reg_offsets[reg])
    898		return;
    899
    900	if (ctrl->nand_version == 0x0702)
    901		bits = 7;
    902	else if (ctrl->nand_version >= 0x0600)
    903		bits = 6;
    904	else if (ctrl->nand_version >= 0x0500)
    905		bits = 5;
    906	else
    907		bits = 4;
    908
    909	if (ctrl->nand_version >= 0x0702) {
    910		if (cs >= 4)
    911			reg = BRCMNAND_CORR_THRESHOLD_EXT;
    912		shift = (cs % 4) * bits;
    913	} else if (ctrl->nand_version >= 0x0600) {
    914		if (cs >= 5)
    915			reg = BRCMNAND_CORR_THRESHOLD_EXT;
    916		shift = (cs % 5) * bits;
    917	}
    918	brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
    919}
    920
    921static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
    922{
    923	/* Kludge for the BCMA-based NAND controller which does not actually
    924	 * shift the command
    925	 */
    926	if (ctrl->nand_version == 0x0304 && brcmnand_non_mmio_ops(ctrl))
    927		return 0;
    928
    929	if (ctrl->nand_version < 0x0602)
    930		return 24;
    931	return 0;
    932}
    933
    934/***********************************************************************
    935 * NAND ACC CONTROL bitfield
    936 *
    937 * Some bits have remained constant throughout hardware revision, while
    938 * others have shifted around.
    939 ***********************************************************************/
    940
    941/* Constant for all versions (where supported) */
    942enum {
    943	/* See BRCMNAND_HAS_CACHE_MODE */
    944	ACC_CONTROL_CACHE_MODE				= BIT(22),
    945
    946	/* See BRCMNAND_HAS_PREFETCH */
    947	ACC_CONTROL_PREFETCH				= BIT(23),
    948
    949	ACC_CONTROL_PAGE_HIT				= BIT(24),
    950	ACC_CONTROL_WR_PREEMPT				= BIT(25),
    951	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
    952	ACC_CONTROL_RD_ERASED				= BIT(27),
    953	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
    954	ACC_CONTROL_WR_ECC				= BIT(30),
    955	ACC_CONTROL_RD_ECC				= BIT(31),
    956};
    957
    958static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
    959{
    960	if (ctrl->nand_version == 0x0702)
    961		return GENMASK(7, 0);
    962	else if (ctrl->nand_version >= 0x0600)
    963		return GENMASK(6, 0);
    964	else if (ctrl->nand_version >= 0x0303)
    965		return GENMASK(5, 0);
    966	else
    967		return GENMASK(4, 0);
    968}
    969
    970#define NAND_ACC_CONTROL_ECC_SHIFT	16
    971#define NAND_ACC_CONTROL_ECC_EXT_SHIFT	13
    972
    973static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
    974{
    975	u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
    976
    977	mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
    978
    979	/* v7.2 includes additional ECC levels */
    980	if (ctrl->nand_version >= 0x0702)
    981		mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
    982
    983	return mask;
    984}
    985
    986static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
    987{
    988	struct brcmnand_controller *ctrl = host->ctrl;
    989	u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
    990	u32 acc_control = nand_readreg(ctrl, offs);
    991	u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
    992
    993	if (en) {
    994		acc_control |= ecc_flags; /* enable RD/WR ECC */
    995		acc_control |= host->hwcfg.ecc_level
    996			       << NAND_ACC_CONTROL_ECC_SHIFT;
    997	} else {
    998		acc_control &= ~ecc_flags; /* disable RD/WR ECC */
    999		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
   1000	}
   1001
   1002	nand_writereg(ctrl, offs, acc_control);
   1003}
   1004
   1005static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
   1006{
   1007	if (ctrl->nand_version >= 0x0702)
   1008		return 9;
   1009	else if (ctrl->nand_version >= 0x0600)
   1010		return 7;
   1011	else if (ctrl->nand_version >= 0x0500)
   1012		return 6;
   1013	else
   1014		return -1;
   1015}
   1016
   1017static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
   1018{
   1019	struct brcmnand_controller *ctrl = host->ctrl;
   1020	int shift = brcmnand_sector_1k_shift(ctrl);
   1021	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
   1022						  BRCMNAND_CS_ACC_CONTROL);
   1023
   1024	if (shift < 0)
   1025		return 0;
   1026
   1027	return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
   1028}
   1029
   1030static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
   1031{
   1032	struct brcmnand_controller *ctrl = host->ctrl;
   1033	int shift = brcmnand_sector_1k_shift(ctrl);
   1034	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
   1035						  BRCMNAND_CS_ACC_CONTROL);
   1036	u32 tmp;
   1037
   1038	if (shift < 0)
   1039		return;
   1040
   1041	tmp = nand_readreg(ctrl, acc_control_offs);
   1042	tmp &= ~(1 << shift);
   1043	tmp |= (!!val) << shift;
   1044	nand_writereg(ctrl, acc_control_offs, tmp);
   1045}
   1046
   1047/***********************************************************************
   1048 * CS_NAND_SELECT
   1049 ***********************************************************************/
   1050
   1051enum {
   1052	CS_SELECT_NAND_WP			= BIT(29),
   1053	CS_SELECT_AUTO_DEVICE_ID_CFG		= BIT(30),
   1054};
   1055
   1056static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
   1057				    u32 mask, u32 expected_val,
   1058				    unsigned long timeout_ms)
   1059{
   1060	unsigned long limit;
   1061	u32 val;
   1062
   1063	if (!timeout_ms)
   1064		timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
   1065
   1066	limit = jiffies + msecs_to_jiffies(timeout_ms);
   1067	do {
   1068		val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
   1069		if ((val & mask) == expected_val)
   1070			return 0;
   1071
   1072		cpu_relax();
   1073	} while (time_after(limit, jiffies));
   1074
   1075	dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
   1076		 expected_val, val & mask);
   1077
   1078	return -ETIMEDOUT;
   1079}
   1080
   1081static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
   1082{
   1083	u32 val = en ? CS_SELECT_NAND_WP : 0;
   1084
   1085	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
   1086}
   1087
   1088/***********************************************************************
   1089 * Flash DMA
   1090 ***********************************************************************/
   1091
   1092static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
   1093{
   1094	return ctrl->flash_dma_base;
   1095}
   1096
   1097static inline bool has_edu(struct brcmnand_controller *ctrl)
   1098{
   1099	return ctrl->edu_base;
   1100}
   1101
   1102static inline bool use_dma(struct brcmnand_controller *ctrl)
   1103{
   1104	return has_flash_dma(ctrl) || has_edu(ctrl);
   1105}
   1106
   1107static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
   1108{
   1109	if (ctrl->pio_poll_mode)
   1110		return;
   1111
   1112	if (has_flash_dma(ctrl)) {
   1113		ctrl->flash_dma_base = NULL;
   1114		disable_irq(ctrl->dma_irq);
   1115	}
   1116
   1117	disable_irq(ctrl->irq);
   1118	ctrl->pio_poll_mode = true;
   1119}
   1120
   1121static inline bool flash_dma_buf_ok(const void *buf)
   1122{
   1123	return buf && !is_vmalloc_addr(buf) &&
   1124		likely(IS_ALIGNED((uintptr_t)buf, 4));
   1125}
   1126
   1127static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
   1128				    enum flash_dma_reg dma_reg, u32 val)
   1129{
   1130	u16 offs = ctrl->flash_dma_offsets[dma_reg];
   1131
   1132	brcmnand_writel(val, ctrl->flash_dma_base + offs);
   1133}
   1134
   1135static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
   1136				  enum flash_dma_reg dma_reg)
   1137{
   1138	u16 offs = ctrl->flash_dma_offsets[dma_reg];
   1139
   1140	return brcmnand_readl(ctrl->flash_dma_base + offs);
   1141}
   1142
   1143/* Low-level operation types: command, address, write, or read */
   1144enum brcmnand_llop_type {
   1145	LL_OP_CMD,
   1146	LL_OP_ADDR,
   1147	LL_OP_WR,
   1148	LL_OP_RD,
   1149};
   1150
   1151/***********************************************************************
   1152 * Internal support functions
   1153 ***********************************************************************/
   1154
   1155static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
   1156				  struct brcmnand_cfg *cfg)
   1157{
   1158	if (ctrl->nand_version <= 0x0701)
   1159		return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
   1160			cfg->ecc_level == 15;
   1161	else
   1162		return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
   1163			cfg->ecc_level == 15) ||
   1164			(cfg->spare_area_size == 28 && cfg->ecc_level == 16));
   1165}
   1166
   1167/*
   1168 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
   1169 * the layout/configuration.
   1170 * Returns -ERRCODE on failure.
   1171 */
   1172static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
   1173					  struct mtd_oob_region *oobregion)
   1174{
   1175	struct nand_chip *chip = mtd_to_nand(mtd);
   1176	struct brcmnand_host *host = nand_get_controller_data(chip);
   1177	struct brcmnand_cfg *cfg = &host->hwcfg;
   1178	int sas = cfg->spare_area_size << cfg->sector_size_1k;
   1179	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
   1180
   1181	if (section >= sectors)
   1182		return -ERANGE;
   1183
   1184	oobregion->offset = (section * sas) + 6;
   1185	oobregion->length = 3;
   1186
   1187	return 0;
   1188}
   1189
   1190static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
   1191					   struct mtd_oob_region *oobregion)
   1192{
   1193	struct nand_chip *chip = mtd_to_nand(mtd);
   1194	struct brcmnand_host *host = nand_get_controller_data(chip);
   1195	struct brcmnand_cfg *cfg = &host->hwcfg;
   1196	int sas = cfg->spare_area_size << cfg->sector_size_1k;
   1197	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
   1198	u32 next;
   1199
   1200	if (section > sectors)
   1201		return -ERANGE;
   1202
   1203	next = (section * sas);
   1204	if (section < sectors)
   1205		next += 6;
   1206
   1207	if (section) {
   1208		oobregion->offset = ((section - 1) * sas) + 9;
   1209	} else {
   1210		if (cfg->page_size > 512) {
   1211			/* Large page NAND uses first 2 bytes for BBI */
   1212			oobregion->offset = 2;
   1213		} else {
   1214			/* Small page NAND uses last byte before ECC for BBI */
   1215			oobregion->offset = 0;
   1216			next--;
   1217		}
   1218	}
   1219
   1220	oobregion->length = next - oobregion->offset;
   1221
   1222	return 0;
   1223}
   1224
   1225static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
   1226	.ecc = brcmnand_hamming_ooblayout_ecc,
   1227	.free = brcmnand_hamming_ooblayout_free,
   1228};
   1229
   1230static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
   1231				      struct mtd_oob_region *oobregion)
   1232{
   1233	struct nand_chip *chip = mtd_to_nand(mtd);
   1234	struct brcmnand_host *host = nand_get_controller_data(chip);
   1235	struct brcmnand_cfg *cfg = &host->hwcfg;
   1236	int sas = cfg->spare_area_size << cfg->sector_size_1k;
   1237	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
   1238
   1239	if (section >= sectors)
   1240		return -ERANGE;
   1241
   1242	oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
   1243	oobregion->length = chip->ecc.bytes;
   1244
   1245	return 0;
   1246}
   1247
   1248static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
   1249					  struct mtd_oob_region *oobregion)
   1250{
   1251	struct nand_chip *chip = mtd_to_nand(mtd);
   1252	struct brcmnand_host *host = nand_get_controller_data(chip);
   1253	struct brcmnand_cfg *cfg = &host->hwcfg;
   1254	int sas = cfg->spare_area_size << cfg->sector_size_1k;
   1255	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
   1256
   1257	if (section >= sectors)
   1258		return -ERANGE;
   1259
   1260	if (sas <= chip->ecc.bytes)
   1261		return 0;
   1262
   1263	oobregion->offset = section * sas;
   1264	oobregion->length = sas - chip->ecc.bytes;
   1265
   1266	if (!section) {
   1267		oobregion->offset++;
   1268		oobregion->length--;
   1269	}
   1270
   1271	return 0;
   1272}
   1273
   1274static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
   1275					  struct mtd_oob_region *oobregion)
   1276{
   1277	struct nand_chip *chip = mtd_to_nand(mtd);
   1278	struct brcmnand_host *host = nand_get_controller_data(chip);
   1279	struct brcmnand_cfg *cfg = &host->hwcfg;
   1280	int sas = cfg->spare_area_size << cfg->sector_size_1k;
   1281
   1282	if (section > 1 || sas - chip->ecc.bytes < 6 ||
   1283	    (section && sas - chip->ecc.bytes == 6))
   1284		return -ERANGE;
   1285
   1286	if (!section) {
   1287		oobregion->offset = 0;
   1288		oobregion->length = 5;
   1289	} else {
   1290		oobregion->offset = 6;
   1291		oobregion->length = sas - chip->ecc.bytes - 6;
   1292	}
   1293
   1294	return 0;
   1295}
   1296
   1297static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
   1298	.ecc = brcmnand_bch_ooblayout_ecc,
   1299	.free = brcmnand_bch_ooblayout_free_lp,
   1300};
   1301
   1302static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
   1303	.ecc = brcmnand_bch_ooblayout_ecc,
   1304	.free = brcmnand_bch_ooblayout_free_sp,
   1305};
   1306
   1307static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
   1308{
   1309	struct brcmnand_cfg *p = &host->hwcfg;
   1310	struct mtd_info *mtd = nand_to_mtd(&host->chip);
   1311	struct nand_ecc_ctrl *ecc = &host->chip.ecc;
   1312	unsigned int ecc_level = p->ecc_level;
   1313	int sas = p->spare_area_size << p->sector_size_1k;
   1314	int sectors = p->page_size / (512 << p->sector_size_1k);
   1315
   1316	if (p->sector_size_1k)
   1317		ecc_level <<= 1;
   1318
   1319	if (is_hamming_ecc(host->ctrl, p)) {
   1320		ecc->bytes = 3 * sectors;
   1321		mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
   1322		return 0;
   1323	}
   1324
   1325	/*
   1326	 * CONTROLLER_VERSION:
   1327	 *   < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
   1328	 *  >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
   1329	 * But we will just be conservative.
   1330	 */
   1331	ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
   1332	if (p->page_size == 512)
   1333		mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
   1334	else
   1335		mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
   1336
   1337	if (ecc->bytes >= sas) {
   1338		dev_err(&host->pdev->dev,
   1339			"error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
   1340			ecc->bytes, sas);
   1341		return -EINVAL;
   1342	}
   1343
   1344	return 0;
   1345}
   1346
   1347static void brcmnand_wp(struct mtd_info *mtd, int wp)
   1348{
   1349	struct nand_chip *chip = mtd_to_nand(mtd);
   1350	struct brcmnand_host *host = nand_get_controller_data(chip);
   1351	struct brcmnand_controller *ctrl = host->ctrl;
   1352
   1353	if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
   1354		static int old_wp = -1;
   1355		int ret;
   1356
   1357		if (old_wp != wp) {
   1358			dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
   1359			old_wp = wp;
   1360		}
   1361
   1362		/*
   1363		 * make sure ctrl/flash ready before and after
   1364		 * changing state of #WP pin
   1365		 */
   1366		ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
   1367					       NAND_STATUS_READY,
   1368					       NAND_CTRL_RDY |
   1369					       NAND_STATUS_READY, 0);
   1370		if (ret)
   1371			return;
   1372
   1373		brcmnand_set_wp(ctrl, wp);
   1374		nand_status_op(chip, NULL);
   1375		/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
   1376		ret = bcmnand_ctrl_poll_status(ctrl,
   1377					       NAND_CTRL_RDY |
   1378					       NAND_STATUS_READY |
   1379					       NAND_STATUS_WP,
   1380					       NAND_CTRL_RDY |
   1381					       NAND_STATUS_READY |
   1382					       (wp ? 0 : NAND_STATUS_WP), 0);
   1383
   1384		if (ret)
   1385			dev_err_ratelimited(&host->pdev->dev,
   1386					    "nand #WP expected %s\n",
   1387					    wp ? "on" : "off");
   1388	}
   1389}
   1390
   1391/* Helper functions for reading and writing OOB registers */
   1392static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
   1393{
   1394	u16 offset0, offset10, reg_offs;
   1395
   1396	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
   1397	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
   1398
   1399	if (offs >= ctrl->max_oob)
   1400		return 0x77;
   1401
   1402	if (offs >= 16 && offset10)
   1403		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
   1404	else
   1405		reg_offs = offset0 + (offs & ~0x03);
   1406
   1407	return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
   1408}
   1409
   1410static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
   1411				 u32 data)
   1412{
   1413	u16 offset0, offset10, reg_offs;
   1414
   1415	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
   1416	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
   1417
   1418	if (offs >= ctrl->max_oob)
   1419		return;
   1420
   1421	if (offs >= 16 && offset10)
   1422		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
   1423	else
   1424		reg_offs = offset0 + (offs & ~0x03);
   1425
   1426	nand_writereg(ctrl, reg_offs, data);
   1427}
   1428
   1429/*
   1430 * read_oob_from_regs - read data from OOB registers
   1431 * @ctrl: NAND controller
   1432 * @i: sub-page sector index
   1433 * @oob: buffer to read to
   1434 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
   1435 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
   1436 */
   1437static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
   1438			      int sas, int sector_1k)
   1439{
   1440	int tbytes = sas << sector_1k;
   1441	int j;
   1442
   1443	/* Adjust OOB values for 1K sector size */
   1444	if (sector_1k && (i & 0x01))
   1445		tbytes = max(0, tbytes - (int)ctrl->max_oob);
   1446	tbytes = min_t(int, tbytes, ctrl->max_oob);
   1447
   1448	for (j = 0; j < tbytes; j++)
   1449		oob[j] = oob_reg_read(ctrl, j);
   1450	return tbytes;
   1451}
   1452
   1453/*
   1454 * write_oob_to_regs - write data to OOB registers
   1455 * @i: sub-page sector index
   1456 * @oob: buffer to write from
   1457 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
   1458 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
   1459 */
   1460static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
   1461			     const u8 *oob, int sas, int sector_1k)
   1462{
   1463	int tbytes = sas << sector_1k;
   1464	int j;
   1465
   1466	/* Adjust OOB values for 1K sector size */
   1467	if (sector_1k && (i & 0x01))
   1468		tbytes = max(0, tbytes - (int)ctrl->max_oob);
   1469	tbytes = min_t(int, tbytes, ctrl->max_oob);
   1470
   1471	for (j = 0; j < tbytes; j += 4)
   1472		oob_reg_write(ctrl, j,
   1473				(oob[j + 0] << 24) |
   1474				(oob[j + 1] << 16) |
   1475				(oob[j + 2] <<  8) |
   1476				(oob[j + 3] <<  0));
   1477	return tbytes;
   1478}
   1479
   1480static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
   1481{
   1482	/* initialize edu */
   1483	edu_writel(ctrl, EDU_ERR_STATUS, 0);
   1484	edu_readl(ctrl, EDU_ERR_STATUS);
   1485	edu_writel(ctrl, EDU_DONE, 0);
   1486	edu_writel(ctrl, EDU_DONE, 0);
   1487	edu_writel(ctrl, EDU_DONE, 0);
   1488	edu_writel(ctrl, EDU_DONE, 0);
   1489	edu_readl(ctrl, EDU_DONE);
   1490}
   1491
   1492/* edu irq */
   1493static irqreturn_t brcmnand_edu_irq(int irq, void *data)
   1494{
   1495	struct brcmnand_controller *ctrl = data;
   1496
   1497	if (ctrl->edu_count) {
   1498		ctrl->edu_count--;
   1499		while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
   1500			udelay(1);
   1501		edu_writel(ctrl, EDU_DONE, 0);
   1502		edu_readl(ctrl, EDU_DONE);
   1503	}
   1504
   1505	if (ctrl->edu_count) {
   1506		ctrl->edu_dram_addr += FC_BYTES;
   1507		ctrl->edu_ext_addr += FC_BYTES;
   1508
   1509		edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
   1510		edu_readl(ctrl, EDU_DRAM_ADDR);
   1511		edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
   1512		edu_readl(ctrl, EDU_EXT_ADDR);
   1513
   1514		if (ctrl->oob) {
   1515			if (ctrl->edu_cmd == EDU_CMD_READ) {
   1516				ctrl->oob += read_oob_from_regs(ctrl,
   1517							ctrl->edu_count + 1,
   1518							ctrl->oob, ctrl->sas,
   1519							ctrl->sector_size_1k);
   1520			} else {
   1521				brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
   1522						   ctrl->edu_ext_addr);
   1523				brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
   1524				ctrl->oob += write_oob_to_regs(ctrl,
   1525							       ctrl->edu_count,
   1526							       ctrl->oob, ctrl->sas,
   1527							       ctrl->sector_size_1k);
   1528			}
   1529		}
   1530
   1531		mb(); /* flush previous writes */
   1532		edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
   1533		edu_readl(ctrl, EDU_CMD);
   1534
   1535		return IRQ_HANDLED;
   1536	}
   1537
   1538	complete(&ctrl->edu_done);
   1539
   1540	return IRQ_HANDLED;
   1541}
   1542
   1543static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
   1544{
   1545	struct brcmnand_controller *ctrl = data;
   1546
   1547	/* Discard all NAND_CTLRDY interrupts during DMA */
   1548	if (ctrl->dma_pending)
   1549		return IRQ_HANDLED;
   1550
   1551	/* check if you need to piggy back on the ctrlrdy irq */
   1552	if (ctrl->edu_pending) {
   1553		if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
   1554	/* Discard interrupts while using dedicated edu irq */
   1555			return IRQ_HANDLED;
   1556
   1557	/* no registered edu irq, call handler */
   1558		return brcmnand_edu_irq(irq, data);
   1559	}
   1560
   1561	complete(&ctrl->done);
   1562	return IRQ_HANDLED;
   1563}
   1564
   1565/* Handle SoC-specific interrupt hardware */
   1566static irqreturn_t brcmnand_irq(int irq, void *data)
   1567{
   1568	struct brcmnand_controller *ctrl = data;
   1569
   1570	if (ctrl->soc->ctlrdy_ack(ctrl->soc))
   1571		return brcmnand_ctlrdy_irq(irq, data);
   1572
   1573	return IRQ_NONE;
   1574}
   1575
   1576static irqreturn_t brcmnand_dma_irq(int irq, void *data)
   1577{
   1578	struct brcmnand_controller *ctrl = data;
   1579
   1580	complete(&ctrl->dma_done);
   1581
   1582	return IRQ_HANDLED;
   1583}
   1584
   1585static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
   1586{
   1587	struct brcmnand_controller *ctrl = host->ctrl;
   1588	int ret;
   1589	u64 cmd_addr;
   1590
   1591	cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
   1592
   1593	dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
   1594
   1595	BUG_ON(ctrl->cmd_pending != 0);
   1596	ctrl->cmd_pending = cmd;
   1597
   1598	ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
   1599	WARN_ON(ret);
   1600
   1601	mb(); /* flush previous writes */
   1602	brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
   1603			   cmd << brcmnand_cmd_shift(ctrl));
   1604}
   1605
   1606/***********************************************************************
   1607 * NAND MTD API: read/program/erase
   1608 ***********************************************************************/
   1609
   1610static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
   1611			      unsigned int ctrl)
   1612{
   1613	/* intentionally left blank */
   1614}
   1615
   1616static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
   1617{
   1618	struct brcmnand_host *host = nand_get_controller_data(chip);
   1619	struct brcmnand_controller *ctrl = host->ctrl;
   1620	struct mtd_info *mtd = nand_to_mtd(chip);
   1621	bool err = false;
   1622	int sts;
   1623
   1624	if (mtd->oops_panic_write || ctrl->irq < 0) {
   1625		/* switch to interrupt polling and PIO mode */
   1626		disable_ctrl_irqs(ctrl);
   1627		sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
   1628					       NAND_CTRL_RDY, 0);
   1629		err = (sts < 0) ? true : false;
   1630	} else {
   1631		unsigned long timeo = msecs_to_jiffies(
   1632						NAND_POLL_STATUS_TIMEOUT_MS);
   1633		/* wait for completion interrupt */
   1634		sts = wait_for_completion_timeout(&ctrl->done, timeo);
   1635		err = (sts <= 0) ? true : false;
   1636	}
   1637
   1638	return err;
   1639}
   1640
   1641static int brcmnand_waitfunc(struct nand_chip *chip)
   1642{
   1643	struct brcmnand_host *host = nand_get_controller_data(chip);
   1644	struct brcmnand_controller *ctrl = host->ctrl;
   1645	bool err = false;
   1646
   1647	dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
   1648	if (ctrl->cmd_pending)
   1649		err = brcmstb_nand_wait_for_completion(chip);
   1650
   1651	if (err) {
   1652		u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
   1653					>> brcmnand_cmd_shift(ctrl);
   1654
   1655		dev_err_ratelimited(ctrl->dev,
   1656			"timeout waiting for command %#02x\n", cmd);
   1657		dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
   1658			brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
   1659	}
   1660	ctrl->cmd_pending = 0;
   1661	return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
   1662				 INTFC_FLASH_STATUS;
   1663}
   1664
   1665enum {
   1666	LLOP_RE				= BIT(16),
   1667	LLOP_WE				= BIT(17),
   1668	LLOP_ALE			= BIT(18),
   1669	LLOP_CLE			= BIT(19),
   1670	LLOP_RETURN_IDLE		= BIT(31),
   1671
   1672	LLOP_DATA_MASK			= GENMASK(15, 0),
   1673};
   1674
   1675static int brcmnand_low_level_op(struct brcmnand_host *host,
   1676				 enum brcmnand_llop_type type, u32 data,
   1677				 bool last_op)
   1678{
   1679	struct nand_chip *chip = &host->chip;
   1680	struct brcmnand_controller *ctrl = host->ctrl;
   1681	u32 tmp;
   1682
   1683	tmp = data & LLOP_DATA_MASK;
   1684	switch (type) {
   1685	case LL_OP_CMD:
   1686		tmp |= LLOP_WE | LLOP_CLE;
   1687		break;
   1688	case LL_OP_ADDR:
   1689		/* WE | ALE */
   1690		tmp |= LLOP_WE | LLOP_ALE;
   1691		break;
   1692	case LL_OP_WR:
   1693		/* WE */
   1694		tmp |= LLOP_WE;
   1695		break;
   1696	case LL_OP_RD:
   1697		/* RE */
   1698		tmp |= LLOP_RE;
   1699		break;
   1700	}
   1701	if (last_op)
   1702		/* RETURN_IDLE */
   1703		tmp |= LLOP_RETURN_IDLE;
   1704
   1705	dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
   1706
   1707	brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
   1708	(void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
   1709
   1710	brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
   1711	return brcmnand_waitfunc(chip);
   1712}
   1713
   1714static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
   1715			     int column, int page_addr)
   1716{
   1717	struct mtd_info *mtd = nand_to_mtd(chip);
   1718	struct brcmnand_host *host = nand_get_controller_data(chip);
   1719	struct brcmnand_controller *ctrl = host->ctrl;
   1720	u64 addr = (u64)page_addr << chip->page_shift;
   1721	int native_cmd = 0;
   1722
   1723	if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
   1724			command == NAND_CMD_RNDOUT)
   1725		addr = (u64)column;
   1726	/* Avoid propagating a negative, don't-care address */
   1727	else if (page_addr < 0)
   1728		addr = 0;
   1729
   1730	dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
   1731		(unsigned long long)addr);
   1732
   1733	host->last_cmd = command;
   1734	host->last_byte = 0;
   1735	host->last_addr = addr;
   1736
   1737	switch (command) {
   1738	case NAND_CMD_RESET:
   1739		native_cmd = CMD_FLASH_RESET;
   1740		break;
   1741	case NAND_CMD_STATUS:
   1742		native_cmd = CMD_STATUS_READ;
   1743		break;
   1744	case NAND_CMD_READID:
   1745		native_cmd = CMD_DEVICE_ID_READ;
   1746		break;
   1747	case NAND_CMD_READOOB:
   1748		native_cmd = CMD_SPARE_AREA_READ;
   1749		break;
   1750	case NAND_CMD_ERASE1:
   1751		native_cmd = CMD_BLOCK_ERASE;
   1752		brcmnand_wp(mtd, 0);
   1753		break;
   1754	case NAND_CMD_PARAM:
   1755		native_cmd = CMD_PARAMETER_READ;
   1756		break;
   1757	case NAND_CMD_SET_FEATURES:
   1758	case NAND_CMD_GET_FEATURES:
   1759		brcmnand_low_level_op(host, LL_OP_CMD, command, false);
   1760		brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
   1761		break;
   1762	case NAND_CMD_RNDOUT:
   1763		native_cmd = CMD_PARAMETER_CHANGE_COL;
   1764		addr &= ~((u64)(FC_BYTES - 1));
   1765		/*
   1766		 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
   1767		 * NB: hwcfg.sector_size_1k may not be initialized yet
   1768		 */
   1769		if (brcmnand_get_sector_size_1k(host)) {
   1770			host->hwcfg.sector_size_1k =
   1771				brcmnand_get_sector_size_1k(host);
   1772			brcmnand_set_sector_size_1k(host, 0);
   1773		}
   1774		break;
   1775	}
   1776
   1777	if (!native_cmd)
   1778		return;
   1779
   1780	brcmnand_set_cmd_addr(mtd, addr);
   1781	brcmnand_send_cmd(host, native_cmd);
   1782	brcmnand_waitfunc(chip);
   1783
   1784	if (native_cmd == CMD_PARAMETER_READ ||
   1785			native_cmd == CMD_PARAMETER_CHANGE_COL) {
   1786		/* Copy flash cache word-wise */
   1787		u32 *flash_cache = (u32 *)ctrl->flash_cache;
   1788		int i;
   1789
   1790		brcmnand_soc_data_bus_prepare(ctrl->soc, true);
   1791
   1792		/*
   1793		 * Must cache the FLASH_CACHE now, since changes in
   1794		 * SECTOR_SIZE_1K may invalidate it
   1795		 */
   1796		for (i = 0; i < FC_WORDS; i++)
   1797			/*
   1798			 * Flash cache is big endian for parameter pages, at
   1799			 * least on STB SoCs
   1800			 */
   1801			flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
   1802
   1803		brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
   1804
   1805		/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
   1806		if (host->hwcfg.sector_size_1k)
   1807			brcmnand_set_sector_size_1k(host,
   1808						    host->hwcfg.sector_size_1k);
   1809	}
   1810
   1811	/* Re-enable protection is necessary only after erase */
   1812	if (command == NAND_CMD_ERASE1)
   1813		brcmnand_wp(mtd, 1);
   1814}
   1815
   1816static uint8_t brcmnand_read_byte(struct nand_chip *chip)
   1817{
   1818	struct brcmnand_host *host = nand_get_controller_data(chip);
   1819	struct brcmnand_controller *ctrl = host->ctrl;
   1820	uint8_t ret = 0;
   1821	int addr, offs;
   1822
   1823	switch (host->last_cmd) {
   1824	case NAND_CMD_READID:
   1825		if (host->last_byte < 4)
   1826			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
   1827				(24 - (host->last_byte << 3));
   1828		else if (host->last_byte < 8)
   1829			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
   1830				(56 - (host->last_byte << 3));
   1831		break;
   1832
   1833	case NAND_CMD_READOOB:
   1834		ret = oob_reg_read(ctrl, host->last_byte);
   1835		break;
   1836
   1837	case NAND_CMD_STATUS:
   1838		ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
   1839					INTFC_FLASH_STATUS;
   1840		if (wp_on) /* hide WP status */
   1841			ret |= NAND_STATUS_WP;
   1842		break;
   1843
   1844	case NAND_CMD_PARAM:
   1845	case NAND_CMD_RNDOUT:
   1846		addr = host->last_addr + host->last_byte;
   1847		offs = addr & (FC_BYTES - 1);
   1848
   1849		/* At FC_BYTES boundary, switch to next column */
   1850		if (host->last_byte > 0 && offs == 0)
   1851			nand_change_read_column_op(chip, addr, NULL, 0, false);
   1852
   1853		ret = ctrl->flash_cache[offs];
   1854		break;
   1855	case NAND_CMD_GET_FEATURES:
   1856		if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
   1857			ret = 0;
   1858		} else {
   1859			bool last = host->last_byte ==
   1860				ONFI_SUBFEATURE_PARAM_LEN - 1;
   1861			brcmnand_low_level_op(host, LL_OP_RD, 0, last);
   1862			ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
   1863		}
   1864	}
   1865
   1866	dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
   1867	host->last_byte++;
   1868
   1869	return ret;
   1870}
   1871
   1872static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
   1873{
   1874	int i;
   1875
   1876	for (i = 0; i < len; i++, buf++)
   1877		*buf = brcmnand_read_byte(chip);
   1878}
   1879
   1880static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
   1881			       int len)
   1882{
   1883	int i;
   1884	struct brcmnand_host *host = nand_get_controller_data(chip);
   1885
   1886	switch (host->last_cmd) {
   1887	case NAND_CMD_SET_FEATURES:
   1888		for (i = 0; i < len; i++)
   1889			brcmnand_low_level_op(host, LL_OP_WR, buf[i],
   1890						  (i + 1) == len);
   1891		break;
   1892	default:
   1893		BUG();
   1894		break;
   1895	}
   1896}
   1897
   1898/*
   1899 *  Kick EDU engine
   1900 */
   1901static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
   1902			      u8 *oob, u32 len, u8 cmd)
   1903{
   1904	struct brcmnand_controller *ctrl = host->ctrl;
   1905	struct brcmnand_cfg *cfg = &host->hwcfg;
   1906	unsigned long timeo = msecs_to_jiffies(200);
   1907	int ret = 0;
   1908	int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
   1909	u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
   1910	unsigned int trans = len >> FC_SHIFT;
   1911	dma_addr_t pa;
   1912
   1913	dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
   1914					      "read" : "write"), buf, oob);
   1915
   1916	pa = dma_map_single(ctrl->dev, buf, len, dir);
   1917	if (dma_mapping_error(ctrl->dev, pa)) {
   1918		dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
   1919		return -ENOMEM;
   1920	}
   1921
   1922	ctrl->edu_pending = true;
   1923	ctrl->edu_dram_addr = pa;
   1924	ctrl->edu_ext_addr = addr;
   1925	ctrl->edu_cmd = edu_cmd;
   1926	ctrl->edu_count = trans;
   1927	ctrl->sas = cfg->spare_area_size;
   1928	ctrl->oob = oob;
   1929
   1930	edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
   1931	edu_readl(ctrl,  EDU_DRAM_ADDR);
   1932	edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
   1933	edu_readl(ctrl, EDU_EXT_ADDR);
   1934	edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
   1935	edu_readl(ctrl, EDU_LENGTH);
   1936
   1937	if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_WRITE)) {
   1938		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
   1939				   ctrl->edu_ext_addr);
   1940		brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
   1941		ctrl->oob += write_oob_to_regs(ctrl,
   1942					       1,
   1943					       ctrl->oob, ctrl->sas,
   1944					       ctrl->sector_size_1k);
   1945	}
   1946
   1947	/* Start edu engine */
   1948	mb(); /* flush previous writes */
   1949	edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
   1950	edu_readl(ctrl, EDU_CMD);
   1951
   1952	if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
   1953		dev_err(ctrl->dev,
   1954			"timeout waiting for EDU; status %#x, error status %#x\n",
   1955			edu_readl(ctrl, EDU_STATUS),
   1956			edu_readl(ctrl, EDU_ERR_STATUS));
   1957	}
   1958
   1959	dma_unmap_single(ctrl->dev, pa, len, dir);
   1960
   1961	/* read last subpage oob */
   1962	if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_READ)) {
   1963		ctrl->oob += read_oob_from_regs(ctrl,
   1964						1,
   1965						ctrl->oob, ctrl->sas,
   1966						ctrl->sector_size_1k);
   1967	}
   1968
   1969	/* for program page check NAND status */
   1970	if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
   1971	      INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
   1972	    edu_cmd == EDU_CMD_WRITE) {
   1973		dev_info(ctrl->dev, "program failed at %llx\n",
   1974			 (unsigned long long)addr);
   1975		ret = -EIO;
   1976	}
   1977
   1978	/* Make sure the EDU status is clean */
   1979	if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
   1980		dev_warn(ctrl->dev, "EDU still active: %#x\n",
   1981			 edu_readl(ctrl, EDU_STATUS));
   1982
   1983	if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
   1984		dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
   1985			 (unsigned long long)addr);
   1986		ret = -EIO;
   1987	}
   1988
   1989	ctrl->edu_pending = false;
   1990	brcmnand_edu_init(ctrl);
   1991	edu_writel(ctrl, EDU_STOP, 0); /* force stop */
   1992	edu_readl(ctrl, EDU_STOP);
   1993
   1994	if (!ret && edu_cmd == EDU_CMD_READ) {
   1995		u64 err_addr = 0;
   1996
   1997		/*
   1998		 * check for ECC errors here, subpage ECC errors are
   1999		 * retained in ECC error address register
   2000		 */
   2001		err_addr = brcmnand_get_uncorrecc_addr(ctrl);
   2002		if (!err_addr) {
   2003			err_addr = brcmnand_get_correcc_addr(ctrl);
   2004			if (err_addr)
   2005				ret = -EUCLEAN;
   2006		} else
   2007			ret = -EBADMSG;
   2008	}
   2009
   2010	return ret;
   2011}
   2012
   2013/*
   2014 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
   2015 * following ahead of time:
   2016 *  - Is this descriptor the beginning or end of a linked list?
   2017 *  - What is the (DMA) address of the next descriptor in the linked list?
   2018 */
   2019static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
   2020				  struct brcm_nand_dma_desc *desc, u64 addr,
   2021				  dma_addr_t buf, u32 len, u8 dma_cmd,
   2022				  bool begin, bool end,
   2023				  dma_addr_t next_desc)
   2024{
   2025	memset(desc, 0, sizeof(*desc));
   2026	/* Descriptors are written in native byte order (wordwise) */
   2027	desc->next_desc = lower_32_bits(next_desc);
   2028	desc->next_desc_ext = upper_32_bits(next_desc);
   2029	desc->cmd_irq = (dma_cmd << 24) |
   2030		(end ? (0x03 << 8) : 0) | /* IRQ | STOP */
   2031		(!!begin) | ((!!end) << 1); /* head, tail */
   2032#ifdef CONFIG_CPU_BIG_ENDIAN
   2033	desc->cmd_irq |= 0x01 << 12;
   2034#endif
   2035	desc->dram_addr = lower_32_bits(buf);
   2036	desc->dram_addr_ext = upper_32_bits(buf);
   2037	desc->tfr_len = len;
   2038	desc->total_len = len;
   2039	desc->flash_addr = lower_32_bits(addr);
   2040	desc->flash_addr_ext = upper_32_bits(addr);
   2041	desc->cs = host->cs;
   2042	desc->status_valid = 0x01;
   2043	return 0;
   2044}
   2045
   2046/*
   2047 * Kick the FLASH_DMA engine, with a given DMA descriptor
   2048 */
   2049static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
   2050{
   2051	struct brcmnand_controller *ctrl = host->ctrl;
   2052	unsigned long timeo = msecs_to_jiffies(100);
   2053
   2054	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
   2055	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
   2056	if (ctrl->nand_version > 0x0602) {
   2057		flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
   2058				 upper_32_bits(desc));
   2059		(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
   2060	}
   2061
   2062	/* Start FLASH_DMA engine */
   2063	ctrl->dma_pending = true;
   2064	mb(); /* flush previous writes */
   2065	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
   2066
   2067	if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
   2068		dev_err(ctrl->dev,
   2069				"timeout waiting for DMA; status %#x, error status %#x\n",
   2070				flash_dma_readl(ctrl, FLASH_DMA_STATUS),
   2071				flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
   2072	}
   2073	ctrl->dma_pending = false;
   2074	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
   2075}
   2076
   2077static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
   2078			      u8 *oob, u32 len, u8 dma_cmd)
   2079{
   2080	struct brcmnand_controller *ctrl = host->ctrl;
   2081	dma_addr_t buf_pa;
   2082	int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
   2083
   2084	buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
   2085	if (dma_mapping_error(ctrl->dev, buf_pa)) {
   2086		dev_err(ctrl->dev, "unable to map buffer for DMA\n");
   2087		return -ENOMEM;
   2088	}
   2089
   2090	brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
   2091				   dma_cmd, true, true, 0);
   2092
   2093	brcmnand_dma_run(host, ctrl->dma_pa);
   2094
   2095	dma_unmap_single(ctrl->dev, buf_pa, len, dir);
   2096
   2097	if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
   2098		return -EBADMSG;
   2099	else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
   2100		return -EUCLEAN;
   2101
   2102	return 0;
   2103}
   2104
   2105/*
   2106 * Assumes proper CS is already set
   2107 */
   2108static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
   2109				u64 addr, unsigned int trans, u32 *buf,
   2110				u8 *oob, u64 *err_addr)
   2111{
   2112	struct brcmnand_host *host = nand_get_controller_data(chip);
   2113	struct brcmnand_controller *ctrl = host->ctrl;
   2114	int i, j, ret = 0;
   2115
   2116	brcmnand_clear_ecc_addr(ctrl);
   2117
   2118	for (i = 0; i < trans; i++, addr += FC_BYTES) {
   2119		brcmnand_set_cmd_addr(mtd, addr);
   2120		/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
   2121		brcmnand_send_cmd(host, CMD_PAGE_READ);
   2122		brcmnand_waitfunc(chip);
   2123
   2124		if (likely(buf)) {
   2125			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
   2126
   2127			for (j = 0; j < FC_WORDS; j++, buf++)
   2128				*buf = brcmnand_read_fc(ctrl, j);
   2129
   2130			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
   2131		}
   2132
   2133		if (oob)
   2134			oob += read_oob_from_regs(ctrl, i, oob,
   2135					mtd->oobsize / trans,
   2136					host->hwcfg.sector_size_1k);
   2137
   2138		if (ret != -EBADMSG) {
   2139			*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
   2140
   2141			if (*err_addr)
   2142				ret = -EBADMSG;
   2143		}
   2144
   2145		if (!ret) {
   2146			*err_addr = brcmnand_get_correcc_addr(ctrl);
   2147
   2148			if (*err_addr)
   2149				ret = -EUCLEAN;
   2150		}
   2151	}
   2152
   2153	return ret;
   2154}
   2155
   2156/*
   2157 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
   2158 * error
   2159 *
   2160 * Because the HW ECC signals an ECC error if an erase paged has even a single
   2161 * bitflip, we must check each ECC error to see if it is actually an erased
   2162 * page with bitflips, not a truly corrupted page.
   2163 *
   2164 * On a real error, return a negative error code (-EBADMSG for ECC error), and
   2165 * buf will contain raw data.
   2166 * Otherwise, buf gets filled with 0xffs and return the maximum number of
   2167 * bitflips-per-ECC-sector to the caller.
   2168 *
   2169 */
   2170static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
   2171		  struct nand_chip *chip, void *buf, u64 addr)
   2172{
   2173	struct mtd_oob_region ecc;
   2174	int i;
   2175	int bitflips = 0;
   2176	int page = addr >> chip->page_shift;
   2177	int ret;
   2178	void *ecc_bytes;
   2179	void *ecc_chunk;
   2180
   2181	if (!buf)
   2182		buf = nand_get_data_buf(chip);
   2183
   2184	/* read without ecc for verification */
   2185	ret = chip->ecc.read_page_raw(chip, buf, true, page);
   2186	if (ret)
   2187		return ret;
   2188
   2189	for (i = 0; i < chip->ecc.steps; i++) {
   2190		ecc_chunk = buf + chip->ecc.size * i;
   2191
   2192		mtd_ooblayout_ecc(mtd, i, &ecc);
   2193		ecc_bytes = chip->oob_poi + ecc.offset;
   2194
   2195		ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
   2196						  ecc_bytes, ecc.length,
   2197						  NULL, 0,
   2198						  chip->ecc.strength);
   2199		if (ret < 0)
   2200			return ret;
   2201
   2202		bitflips = max(bitflips, ret);
   2203	}
   2204
   2205	return bitflips;
   2206}
   2207
   2208static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
   2209			 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
   2210{
   2211	struct brcmnand_host *host = nand_get_controller_data(chip);
   2212	struct brcmnand_controller *ctrl = host->ctrl;
   2213	u64 err_addr = 0;
   2214	int err;
   2215	bool retry = true;
   2216	bool edu_err = false;
   2217
   2218	dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
   2219
   2220try_dmaread:
   2221	brcmnand_clear_ecc_addr(ctrl);
   2222
   2223	if (ctrl->dma_trans && (has_edu(ctrl) || !oob) &&
   2224	    flash_dma_buf_ok(buf)) {
   2225		err = ctrl->dma_trans(host, addr, buf, oob,
   2226				      trans * FC_BYTES,
   2227				      CMD_PAGE_READ);
   2228
   2229		if (err) {
   2230			if (mtd_is_bitflip_or_eccerr(err))
   2231				err_addr = addr;
   2232			else
   2233				return -EIO;
   2234		}
   2235
   2236		if (has_edu(ctrl) && err_addr)
   2237			edu_err = true;
   2238
   2239	} else {
   2240		if (oob)
   2241			memset(oob, 0x99, mtd->oobsize);
   2242
   2243		err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
   2244					       oob, &err_addr);
   2245	}
   2246
   2247	if (mtd_is_eccerr(err)) {
   2248		/*
   2249		 * On controller version and 7.0, 7.1 , DMA read after a
   2250		 * prior PIO read that reported uncorrectable error,
   2251		 * the DMA engine captures this error following DMA read
   2252		 * cleared only on subsequent DMA read, so just retry once
   2253		 * to clear a possible false error reported for current DMA
   2254		 * read
   2255		 */
   2256		if ((ctrl->nand_version == 0x0700) ||
   2257		    (ctrl->nand_version == 0x0701)) {
   2258			if (retry) {
   2259				retry = false;
   2260				goto try_dmaread;
   2261			}
   2262		}
   2263
   2264		/*
   2265		 * Controller version 7.2 has hw encoder to detect erased page
   2266		 * bitflips, apply sw verification for older controllers only
   2267		 */
   2268		if (ctrl->nand_version < 0x0702) {
   2269			err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
   2270							      addr);
   2271			/* erased page bitflips corrected */
   2272			if (err >= 0)
   2273				return err;
   2274		}
   2275
   2276		dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
   2277			(unsigned long long)err_addr);
   2278		mtd->ecc_stats.failed++;
   2279		/* NAND layer expects zero on ECC errors */
   2280		return 0;
   2281	}
   2282
   2283	if (mtd_is_bitflip(err)) {
   2284		unsigned int corrected = brcmnand_count_corrected(ctrl);
   2285
   2286		/* in case of EDU correctable error we read again using PIO */
   2287		if (edu_err)
   2288			err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
   2289						   oob, &err_addr);
   2290
   2291		dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
   2292			(unsigned long long)err_addr);
   2293		mtd->ecc_stats.corrected += corrected;
   2294		/* Always exceed the software-imposed threshold */
   2295		return max(mtd->bitflip_threshold, corrected);
   2296	}
   2297
   2298	return 0;
   2299}
   2300
   2301static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
   2302			      int oob_required, int page)
   2303{
   2304	struct mtd_info *mtd = nand_to_mtd(chip);
   2305	struct brcmnand_host *host = nand_get_controller_data(chip);
   2306	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
   2307
   2308	nand_read_page_op(chip, page, 0, NULL, 0);
   2309
   2310	return brcmnand_read(mtd, chip, host->last_addr,
   2311			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
   2312}
   2313
   2314static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
   2315				  int oob_required, int page)
   2316{
   2317	struct brcmnand_host *host = nand_get_controller_data(chip);
   2318	struct mtd_info *mtd = nand_to_mtd(chip);
   2319	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
   2320	int ret;
   2321
   2322	nand_read_page_op(chip, page, 0, NULL, 0);
   2323
   2324	brcmnand_set_ecc_enabled(host, 0);
   2325	ret = brcmnand_read(mtd, chip, host->last_addr,
   2326			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
   2327	brcmnand_set_ecc_enabled(host, 1);
   2328	return ret;
   2329}
   2330
   2331static int brcmnand_read_oob(struct nand_chip *chip, int page)
   2332{
   2333	struct mtd_info *mtd = nand_to_mtd(chip);
   2334
   2335	return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
   2336			mtd->writesize >> FC_SHIFT,
   2337			NULL, (u8 *)chip->oob_poi);
   2338}
   2339
   2340static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
   2341{
   2342	struct mtd_info *mtd = nand_to_mtd(chip);
   2343	struct brcmnand_host *host = nand_get_controller_data(chip);
   2344
   2345	brcmnand_set_ecc_enabled(host, 0);
   2346	brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
   2347		mtd->writesize >> FC_SHIFT,
   2348		NULL, (u8 *)chip->oob_poi);
   2349	brcmnand_set_ecc_enabled(host, 1);
   2350	return 0;
   2351}
   2352
   2353static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
   2354			  u64 addr, const u32 *buf, u8 *oob)
   2355{
   2356	struct brcmnand_host *host = nand_get_controller_data(chip);
   2357	struct brcmnand_controller *ctrl = host->ctrl;
   2358	unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
   2359	int status, ret = 0;
   2360
   2361	dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
   2362
   2363	if (unlikely((unsigned long)buf & 0x03)) {
   2364		dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
   2365		buf = (u32 *)((unsigned long)buf & ~0x03);
   2366	}
   2367
   2368	brcmnand_wp(mtd, 0);
   2369
   2370	for (i = 0; i < ctrl->max_oob; i += 4)
   2371		oob_reg_write(ctrl, i, 0xffffffff);
   2372
   2373	if (mtd->oops_panic_write)
   2374		/* switch to interrupt polling and PIO mode */
   2375		disable_ctrl_irqs(ctrl);
   2376
   2377	if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
   2378		if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize,
   2379				    CMD_PROGRAM_PAGE))
   2380
   2381			ret = -EIO;
   2382
   2383		goto out;
   2384	}
   2385
   2386	for (i = 0; i < trans; i++, addr += FC_BYTES) {
   2387		/* full address MUST be set before populating FC */
   2388		brcmnand_set_cmd_addr(mtd, addr);
   2389
   2390		if (buf) {
   2391			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
   2392
   2393			for (j = 0; j < FC_WORDS; j++, buf++)
   2394				brcmnand_write_fc(ctrl, j, *buf);
   2395
   2396			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
   2397		} else if (oob) {
   2398			for (j = 0; j < FC_WORDS; j++)
   2399				brcmnand_write_fc(ctrl, j, 0xffffffff);
   2400		}
   2401
   2402		if (oob) {
   2403			oob += write_oob_to_regs(ctrl, i, oob,
   2404					mtd->oobsize / trans,
   2405					host->hwcfg.sector_size_1k);
   2406		}
   2407
   2408		/* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
   2409		brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
   2410		status = brcmnand_waitfunc(chip);
   2411
   2412		if (status & NAND_STATUS_FAIL) {
   2413			dev_info(ctrl->dev, "program failed at %llx\n",
   2414				(unsigned long long)addr);
   2415			ret = -EIO;
   2416			goto out;
   2417		}
   2418	}
   2419out:
   2420	brcmnand_wp(mtd, 1);
   2421	return ret;
   2422}
   2423
   2424static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
   2425			       int oob_required, int page)
   2426{
   2427	struct mtd_info *mtd = nand_to_mtd(chip);
   2428	struct brcmnand_host *host = nand_get_controller_data(chip);
   2429	void *oob = oob_required ? chip->oob_poi : NULL;
   2430
   2431	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
   2432	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
   2433
   2434	return nand_prog_page_end_op(chip);
   2435}
   2436
   2437static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
   2438				   int oob_required, int page)
   2439{
   2440	struct mtd_info *mtd = nand_to_mtd(chip);
   2441	struct brcmnand_host *host = nand_get_controller_data(chip);
   2442	void *oob = oob_required ? chip->oob_poi : NULL;
   2443
   2444	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
   2445	brcmnand_set_ecc_enabled(host, 0);
   2446	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
   2447	brcmnand_set_ecc_enabled(host, 1);
   2448
   2449	return nand_prog_page_end_op(chip);
   2450}
   2451
   2452static int brcmnand_write_oob(struct nand_chip *chip, int page)
   2453{
   2454	return brcmnand_write(nand_to_mtd(chip), chip,
   2455			      (u64)page << chip->page_shift, NULL,
   2456			      chip->oob_poi);
   2457}
   2458
   2459static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
   2460{
   2461	struct mtd_info *mtd = nand_to_mtd(chip);
   2462	struct brcmnand_host *host = nand_get_controller_data(chip);
   2463	int ret;
   2464
   2465	brcmnand_set_ecc_enabled(host, 0);
   2466	ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
   2467				 (u8 *)chip->oob_poi);
   2468	brcmnand_set_ecc_enabled(host, 1);
   2469
   2470	return ret;
   2471}
   2472
   2473/***********************************************************************
   2474 * Per-CS setup (1 NAND device)
   2475 ***********************************************************************/
   2476
   2477static int brcmnand_set_cfg(struct brcmnand_host *host,
   2478			    struct brcmnand_cfg *cfg)
   2479{
   2480	struct brcmnand_controller *ctrl = host->ctrl;
   2481	struct nand_chip *chip = &host->chip;
   2482	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
   2483	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
   2484			BRCMNAND_CS_CFG_EXT);
   2485	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
   2486			BRCMNAND_CS_ACC_CONTROL);
   2487	u8 block_size = 0, page_size = 0, device_size = 0;
   2488	u32 tmp;
   2489
   2490	if (ctrl->block_sizes) {
   2491		int i, found;
   2492
   2493		for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
   2494			if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
   2495				block_size = i;
   2496				found = 1;
   2497			}
   2498		if (!found) {
   2499			dev_warn(ctrl->dev, "invalid block size %u\n",
   2500					cfg->block_size);
   2501			return -EINVAL;
   2502		}
   2503	} else {
   2504		block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
   2505	}
   2506
   2507	if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
   2508				cfg->block_size > ctrl->max_block_size)) {
   2509		dev_warn(ctrl->dev, "invalid block size %u\n",
   2510				cfg->block_size);
   2511		block_size = 0;
   2512	}
   2513
   2514	if (ctrl->page_sizes) {
   2515		int i, found;
   2516
   2517		for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
   2518			if (ctrl->page_sizes[i] == cfg->page_size) {
   2519				page_size = i;
   2520				found = 1;
   2521			}
   2522		if (!found) {
   2523			dev_warn(ctrl->dev, "invalid page size %u\n",
   2524					cfg->page_size);
   2525			return -EINVAL;
   2526		}
   2527	} else {
   2528		page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
   2529	}
   2530
   2531	if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
   2532				cfg->page_size > ctrl->max_page_size)) {
   2533		dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
   2534		return -EINVAL;
   2535	}
   2536
   2537	if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
   2538		dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
   2539			(unsigned long long)cfg->device_size);
   2540		return -EINVAL;
   2541	}
   2542	device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
   2543
   2544	tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
   2545		(cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
   2546		(cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
   2547		(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
   2548		(device_size << CFG_DEVICE_SIZE_SHIFT);
   2549	if (cfg_offs == cfg_ext_offs) {
   2550		tmp |= (page_size << ctrl->page_size_shift) |
   2551		       (block_size << CFG_BLK_SIZE_SHIFT);
   2552		nand_writereg(ctrl, cfg_offs, tmp);
   2553	} else {
   2554		nand_writereg(ctrl, cfg_offs, tmp);
   2555		tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
   2556		      (block_size << CFG_EXT_BLK_SIZE_SHIFT);
   2557		nand_writereg(ctrl, cfg_ext_offs, tmp);
   2558	}
   2559
   2560	tmp = nand_readreg(ctrl, acc_control_offs);
   2561	tmp &= ~brcmnand_ecc_level_mask(ctrl);
   2562	tmp &= ~brcmnand_spare_area_mask(ctrl);
   2563	if (ctrl->nand_version >= 0x0302) {
   2564		tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
   2565		tmp |= cfg->spare_area_size;
   2566	}
   2567	nand_writereg(ctrl, acc_control_offs, tmp);
   2568
   2569	brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
   2570
   2571	/* threshold = ceil(BCH-level * 0.75) */
   2572	brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
   2573
   2574	return 0;
   2575}
   2576
   2577static void brcmnand_print_cfg(struct brcmnand_host *host,
   2578			       char *buf, struct brcmnand_cfg *cfg)
   2579{
   2580	buf += sprintf(buf,
   2581		"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
   2582		(unsigned long long)cfg->device_size >> 20,
   2583		cfg->block_size >> 10,
   2584		cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
   2585		cfg->page_size >= 1024 ? "KiB" : "B",
   2586		cfg->spare_area_size, cfg->device_width);
   2587
   2588	/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
   2589	if (is_hamming_ecc(host->ctrl, cfg))
   2590		sprintf(buf, ", Hamming ECC");
   2591	else if (cfg->sector_size_1k)
   2592		sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
   2593	else
   2594		sprintf(buf, ", BCH-%u", cfg->ecc_level);
   2595}
   2596
   2597/*
   2598 * Minimum number of bytes to address a page. Calculated as:
   2599 *     roundup(log2(size / page-size) / 8)
   2600 *
   2601 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
   2602 *     OK because many other things will break if 'size' is irregular...
   2603 */
   2604static inline int get_blk_adr_bytes(u64 size, u32 writesize)
   2605{
   2606	return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
   2607}
   2608
   2609static int brcmnand_setup_dev(struct brcmnand_host *host)
   2610{
   2611	struct mtd_info *mtd = nand_to_mtd(&host->chip);
   2612	struct nand_chip *chip = &host->chip;
   2613	const struct nand_ecc_props *requirements =
   2614		nanddev_get_ecc_requirements(&chip->base);
   2615	struct brcmnand_controller *ctrl = host->ctrl;
   2616	struct brcmnand_cfg *cfg = &host->hwcfg;
   2617	char msg[128];
   2618	u32 offs, tmp, oob_sector;
   2619	int ret;
   2620
   2621	memset(cfg, 0, sizeof(*cfg));
   2622
   2623	ret = of_property_read_u32(nand_get_flash_node(chip),
   2624				   "brcm,nand-oob-sector-size",
   2625				   &oob_sector);
   2626	if (ret) {
   2627		/* Use detected size */
   2628		cfg->spare_area_size = mtd->oobsize /
   2629					(mtd->writesize >> FC_SHIFT);
   2630	} else {
   2631		cfg->spare_area_size = oob_sector;
   2632	}
   2633	if (cfg->spare_area_size > ctrl->max_oob)
   2634		cfg->spare_area_size = ctrl->max_oob;
   2635	/*
   2636	 * Set oobsize to be consistent with controller's spare_area_size, as
   2637	 * the rest is inaccessible.
   2638	 */
   2639	mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
   2640
   2641	cfg->device_size = mtd->size;
   2642	cfg->block_size = mtd->erasesize;
   2643	cfg->page_size = mtd->writesize;
   2644	cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
   2645	cfg->col_adr_bytes = 2;
   2646	cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
   2647
   2648	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
   2649		dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
   2650			chip->ecc.engine_type);
   2651		return -EINVAL;
   2652	}
   2653
   2654	if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
   2655		if (chip->ecc.strength == 1 && chip->ecc.size == 512)
   2656			/* Default to Hamming for 1-bit ECC, if unspecified */
   2657			chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
   2658		else
   2659			/* Otherwise, BCH */
   2660			chip->ecc.algo = NAND_ECC_ALGO_BCH;
   2661	}
   2662
   2663	if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
   2664	    (chip->ecc.strength != 1 || chip->ecc.size != 512)) {
   2665		dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
   2666			chip->ecc.strength, chip->ecc.size);
   2667		return -EINVAL;
   2668	}
   2669
   2670	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
   2671	    (!chip->ecc.size || !chip->ecc.strength)) {
   2672		if (requirements->step_size && requirements->strength) {
   2673			/* use detected ECC parameters */
   2674			chip->ecc.size = requirements->step_size;
   2675			chip->ecc.strength = requirements->strength;
   2676			dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
   2677				chip->ecc.size, chip->ecc.strength);
   2678		}
   2679	}
   2680
   2681	switch (chip->ecc.size) {
   2682	case 512:
   2683		if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
   2684			cfg->ecc_level = 15;
   2685		else
   2686			cfg->ecc_level = chip->ecc.strength;
   2687		cfg->sector_size_1k = 0;
   2688		break;
   2689	case 1024:
   2690		if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
   2691			dev_err(ctrl->dev, "1KB sectors not supported\n");
   2692			return -EINVAL;
   2693		}
   2694		if (chip->ecc.strength & 0x1) {
   2695			dev_err(ctrl->dev,
   2696				"odd ECC not supported with 1KB sectors\n");
   2697			return -EINVAL;
   2698		}
   2699
   2700		cfg->ecc_level = chip->ecc.strength >> 1;
   2701		cfg->sector_size_1k = 1;
   2702		break;
   2703	default:
   2704		dev_err(ctrl->dev, "unsupported ECC size: %d\n",
   2705			chip->ecc.size);
   2706		return -EINVAL;
   2707	}
   2708
   2709	cfg->ful_adr_bytes = cfg->blk_adr_bytes;
   2710	if (mtd->writesize > 512)
   2711		cfg->ful_adr_bytes += cfg->col_adr_bytes;
   2712	else
   2713		cfg->ful_adr_bytes += 1;
   2714
   2715	ret = brcmnand_set_cfg(host, cfg);
   2716	if (ret)
   2717		return ret;
   2718
   2719	brcmnand_set_ecc_enabled(host, 1);
   2720
   2721	brcmnand_print_cfg(host, msg, cfg);
   2722	dev_info(ctrl->dev, "detected %s\n", msg);
   2723
   2724	/* Configure ACC_CONTROL */
   2725	offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
   2726	tmp = nand_readreg(ctrl, offs);
   2727	tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
   2728	tmp &= ~ACC_CONTROL_RD_ERASED;
   2729
   2730	/* We need to turn on Read from erased paged protected by ECC */
   2731	if (ctrl->nand_version >= 0x0702)
   2732		tmp |= ACC_CONTROL_RD_ERASED;
   2733	tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
   2734	if (ctrl->features & BRCMNAND_HAS_PREFETCH)
   2735		tmp &= ~ACC_CONTROL_PREFETCH;
   2736
   2737	nand_writereg(ctrl, offs, tmp);
   2738
   2739	return 0;
   2740}
   2741
   2742static int brcmnand_attach_chip(struct nand_chip *chip)
   2743{
   2744	struct mtd_info *mtd = nand_to_mtd(chip);
   2745	struct brcmnand_host *host = nand_get_controller_data(chip);
   2746	int ret;
   2747
   2748	chip->options |= NAND_NO_SUBPAGE_WRITE;
   2749	/*
   2750	 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
   2751	 * to/from, and have nand_base pass us a bounce buffer instead, as
   2752	 * needed.
   2753	 */
   2754	chip->options |= NAND_USES_DMA;
   2755
   2756	if (chip->bbt_options & NAND_BBT_USE_FLASH)
   2757		chip->bbt_options |= NAND_BBT_NO_OOB;
   2758
   2759	if (brcmnand_setup_dev(host))
   2760		return -ENXIO;
   2761
   2762	chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
   2763
   2764	/* only use our internal HW threshold */
   2765	mtd->bitflip_threshold = 1;
   2766
   2767	ret = brcmstb_choose_ecc_layout(host);
   2768
   2769	/* If OOB is written with ECC enabled it will cause ECC errors */
   2770	if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
   2771		chip->ecc.write_oob = brcmnand_write_oob_raw;
   2772		chip->ecc.read_oob = brcmnand_read_oob_raw;
   2773	}
   2774
   2775	return ret;
   2776}
   2777
   2778static const struct nand_controller_ops brcmnand_controller_ops = {
   2779	.attach_chip = brcmnand_attach_chip,
   2780};
   2781
   2782static int brcmnand_init_cs(struct brcmnand_host *host,
   2783			    const char * const *part_probe_types)
   2784{
   2785	struct brcmnand_controller *ctrl = host->ctrl;
   2786	struct device *dev = ctrl->dev;
   2787	struct mtd_info *mtd;
   2788	struct nand_chip *chip;
   2789	int ret;
   2790	u16 cfg_offs;
   2791
   2792	mtd = nand_to_mtd(&host->chip);
   2793	chip = &host->chip;
   2794
   2795	nand_set_controller_data(chip, host);
   2796	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "brcmnand.%d",
   2797				   host->cs);
   2798	if (!mtd->name)
   2799		return -ENOMEM;
   2800
   2801	mtd->owner = THIS_MODULE;
   2802	mtd->dev.parent = dev;
   2803
   2804	chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
   2805	chip->legacy.cmdfunc = brcmnand_cmdfunc;
   2806	chip->legacy.waitfunc = brcmnand_waitfunc;
   2807	chip->legacy.read_byte = brcmnand_read_byte;
   2808	chip->legacy.read_buf = brcmnand_read_buf;
   2809	chip->legacy.write_buf = brcmnand_write_buf;
   2810
   2811	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
   2812	chip->ecc.read_page = brcmnand_read_page;
   2813	chip->ecc.write_page = brcmnand_write_page;
   2814	chip->ecc.read_page_raw = brcmnand_read_page_raw;
   2815	chip->ecc.write_page_raw = brcmnand_write_page_raw;
   2816	chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
   2817	chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
   2818	chip->ecc.read_oob = brcmnand_read_oob;
   2819	chip->ecc.write_oob = brcmnand_write_oob;
   2820
   2821	chip->controller = &ctrl->controller;
   2822
   2823	/*
   2824	 * The bootloader might have configured 16bit mode but
   2825	 * NAND READID command only works in 8bit mode. We force
   2826	 * 8bit mode here to ensure that NAND READID commands works.
   2827	 */
   2828	cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
   2829	nand_writereg(ctrl, cfg_offs,
   2830		      nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
   2831
   2832	ret = nand_scan(chip, 1);
   2833	if (ret)
   2834		return ret;
   2835
   2836	ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
   2837	if (ret)
   2838		nand_cleanup(chip);
   2839
   2840	return ret;
   2841}
   2842
   2843static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
   2844					    int restore)
   2845{
   2846	struct brcmnand_controller *ctrl = host->ctrl;
   2847	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
   2848	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
   2849			BRCMNAND_CS_CFG_EXT);
   2850	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
   2851			BRCMNAND_CS_ACC_CONTROL);
   2852	u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
   2853	u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
   2854
   2855	if (restore) {
   2856		nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
   2857		if (cfg_offs != cfg_ext_offs)
   2858			nand_writereg(ctrl, cfg_ext_offs,
   2859				      host->hwcfg.config_ext);
   2860		nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
   2861		nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
   2862		nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
   2863	} else {
   2864		host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
   2865		if (cfg_offs != cfg_ext_offs)
   2866			host->hwcfg.config_ext =
   2867				nand_readreg(ctrl, cfg_ext_offs);
   2868		host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
   2869		host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
   2870		host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
   2871	}
   2872}
   2873
   2874static int brcmnand_suspend(struct device *dev)
   2875{
   2876	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
   2877	struct brcmnand_host *host;
   2878
   2879	list_for_each_entry(host, &ctrl->host_list, node)
   2880		brcmnand_save_restore_cs_config(host, 0);
   2881
   2882	ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
   2883	ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
   2884	ctrl->corr_stat_threshold =
   2885		brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
   2886
   2887	if (has_flash_dma(ctrl))
   2888		ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
   2889	else if (has_edu(ctrl))
   2890		ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
   2891
   2892	return 0;
   2893}
   2894
   2895static int brcmnand_resume(struct device *dev)
   2896{
   2897	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
   2898	struct brcmnand_host *host;
   2899
   2900	if (has_flash_dma(ctrl)) {
   2901		flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
   2902		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
   2903	}
   2904
   2905	if (has_edu(ctrl)) {
   2906		ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
   2907		edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
   2908		edu_readl(ctrl, EDU_CONFIG);
   2909		brcmnand_edu_init(ctrl);
   2910	}
   2911
   2912	brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
   2913	brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
   2914	brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
   2915			ctrl->corr_stat_threshold);
   2916	if (ctrl->soc) {
   2917		/* Clear/re-enable interrupt */
   2918		ctrl->soc->ctlrdy_ack(ctrl->soc);
   2919		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
   2920	}
   2921
   2922	list_for_each_entry(host, &ctrl->host_list, node) {
   2923		struct nand_chip *chip = &host->chip;
   2924
   2925		brcmnand_save_restore_cs_config(host, 1);
   2926
   2927		/* Reset the chip, required by some chips after power-up */
   2928		nand_reset_op(chip);
   2929	}
   2930
   2931	return 0;
   2932}
   2933
   2934const struct dev_pm_ops brcmnand_pm_ops = {
   2935	.suspend		= brcmnand_suspend,
   2936	.resume			= brcmnand_resume,
   2937};
   2938EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
   2939
   2940static const struct of_device_id __maybe_unused brcmnand_of_match[] = {
   2941	{ .compatible = "brcm,brcmnand-v2.1" },
   2942	{ .compatible = "brcm,brcmnand-v2.2" },
   2943	{ .compatible = "brcm,brcmnand-v4.0" },
   2944	{ .compatible = "brcm,brcmnand-v5.0" },
   2945	{ .compatible = "brcm,brcmnand-v6.0" },
   2946	{ .compatible = "brcm,brcmnand-v6.1" },
   2947	{ .compatible = "brcm,brcmnand-v6.2" },
   2948	{ .compatible = "brcm,brcmnand-v7.0" },
   2949	{ .compatible = "brcm,brcmnand-v7.1" },
   2950	{ .compatible = "brcm,brcmnand-v7.2" },
   2951	{ .compatible = "brcm,brcmnand-v7.3" },
   2952	{},
   2953};
   2954MODULE_DEVICE_TABLE(of, brcmnand_of_match);
   2955
   2956/***********************************************************************
   2957 * Platform driver setup (per controller)
   2958 ***********************************************************************/
   2959static int brcmnand_edu_setup(struct platform_device *pdev)
   2960{
   2961	struct device *dev = &pdev->dev;
   2962	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
   2963	struct resource *res;
   2964	int ret;
   2965
   2966	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
   2967	if (res) {
   2968		ctrl->edu_base = devm_ioremap_resource(dev, res);
   2969		if (IS_ERR(ctrl->edu_base))
   2970			return PTR_ERR(ctrl->edu_base);
   2971
   2972		ctrl->edu_offsets = edu_regs;
   2973
   2974		edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
   2975			   EDU_CONFIG_SWAP_CFG);
   2976		edu_readl(ctrl, EDU_CONFIG);
   2977
   2978		/* initialize edu */
   2979		brcmnand_edu_init(ctrl);
   2980
   2981		ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
   2982		if (ctrl->edu_irq < 0) {
   2983			dev_warn(dev,
   2984				 "FLASH EDU enabled, using ctlrdy irq\n");
   2985		} else {
   2986			ret = devm_request_irq(dev, ctrl->edu_irq,
   2987					       brcmnand_edu_irq, 0,
   2988					       "brcmnand-edu", ctrl);
   2989			if (ret < 0) {
   2990				dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
   2991					ctrl->edu_irq, ret);
   2992				return ret;
   2993			}
   2994
   2995			dev_info(dev, "FLASH EDU enabled using irq %u\n",
   2996				 ctrl->edu_irq);
   2997		}
   2998	}
   2999
   3000	return 0;
   3001}
   3002
   3003int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
   3004{
   3005	struct brcmnand_platform_data *pd = dev_get_platdata(&pdev->dev);
   3006	struct device *dev = &pdev->dev;
   3007	struct device_node *dn = dev->of_node, *child;
   3008	struct brcmnand_controller *ctrl;
   3009	struct brcmnand_host *host;
   3010	struct resource *res;
   3011	int ret;
   3012
   3013	if (dn && !of_match_node(brcmnand_of_match, dn))
   3014		return -ENODEV;
   3015
   3016	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
   3017	if (!ctrl)
   3018		return -ENOMEM;
   3019
   3020	dev_set_drvdata(dev, ctrl);
   3021	ctrl->dev = dev;
   3022	ctrl->soc = soc;
   3023
   3024	/* Enable the static key if the soc provides I/O operations indicating
   3025	 * that a non-memory mapped IO access path must be used
   3026	 */
   3027	if (brcmnand_soc_has_ops(ctrl->soc))
   3028		static_branch_enable(&brcmnand_soc_has_ops_key);
   3029
   3030	init_completion(&ctrl->done);
   3031	init_completion(&ctrl->dma_done);
   3032	init_completion(&ctrl->edu_done);
   3033	nand_controller_init(&ctrl->controller);
   3034	ctrl->controller.ops = &brcmnand_controller_ops;
   3035	INIT_LIST_HEAD(&ctrl->host_list);
   3036
   3037	/* NAND register range */
   3038	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   3039	ctrl->nand_base = devm_ioremap_resource(dev, res);
   3040	if (IS_ERR(ctrl->nand_base) && !brcmnand_soc_has_ops(soc))
   3041		return PTR_ERR(ctrl->nand_base);
   3042
   3043	/* Enable clock before using NAND registers */
   3044	ctrl->clk = devm_clk_get(dev, "nand");
   3045	if (!IS_ERR(ctrl->clk)) {
   3046		ret = clk_prepare_enable(ctrl->clk);
   3047		if (ret)
   3048			return ret;
   3049	} else {
   3050		ret = PTR_ERR(ctrl->clk);
   3051		if (ret == -EPROBE_DEFER)
   3052			return ret;
   3053
   3054		ctrl->clk = NULL;
   3055	}
   3056
   3057	/* Initialize NAND revision */
   3058	ret = brcmnand_revision_init(ctrl);
   3059	if (ret)
   3060		goto err;
   3061
   3062	/*
   3063	 * Most chips have this cache at a fixed offset within 'nand' block.
   3064	 * Some must specify this region separately.
   3065	 */
   3066	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
   3067	if (res) {
   3068		ctrl->nand_fc = devm_ioremap_resource(dev, res);
   3069		if (IS_ERR(ctrl->nand_fc)) {
   3070			ret = PTR_ERR(ctrl->nand_fc);
   3071			goto err;
   3072		}
   3073	} else {
   3074		ctrl->nand_fc = ctrl->nand_base +
   3075				ctrl->reg_offsets[BRCMNAND_FC_BASE];
   3076	}
   3077
   3078	/* FLASH_DMA */
   3079	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
   3080	if (res) {
   3081		ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
   3082		if (IS_ERR(ctrl->flash_dma_base)) {
   3083			ret = PTR_ERR(ctrl->flash_dma_base);
   3084			goto err;
   3085		}
   3086
   3087		/* initialize the dma version */
   3088		brcmnand_flash_dma_revision_init(ctrl);
   3089
   3090		ret = -EIO;
   3091		if (ctrl->nand_version >= 0x0700)
   3092			ret = dma_set_mask_and_coherent(&pdev->dev,
   3093							DMA_BIT_MASK(40));
   3094		if (ret)
   3095			ret = dma_set_mask_and_coherent(&pdev->dev,
   3096							DMA_BIT_MASK(32));
   3097		if (ret)
   3098			goto err;
   3099
   3100		/* linked-list and stop on error */
   3101		flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
   3102		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
   3103
   3104		/* Allocate descriptor(s) */
   3105		ctrl->dma_desc = dmam_alloc_coherent(dev,
   3106						     sizeof(*ctrl->dma_desc),
   3107						     &ctrl->dma_pa, GFP_KERNEL);
   3108		if (!ctrl->dma_desc) {
   3109			ret = -ENOMEM;
   3110			goto err;
   3111		}
   3112
   3113		ctrl->dma_irq = platform_get_irq(pdev, 1);
   3114		if ((int)ctrl->dma_irq < 0) {
   3115			dev_err(dev, "missing FLASH_DMA IRQ\n");
   3116			ret = -ENODEV;
   3117			goto err;
   3118		}
   3119
   3120		ret = devm_request_irq(dev, ctrl->dma_irq,
   3121				brcmnand_dma_irq, 0, DRV_NAME,
   3122				ctrl);
   3123		if (ret < 0) {
   3124			dev_err(dev, "can't allocate IRQ %d: error %d\n",
   3125					ctrl->dma_irq, ret);
   3126			goto err;
   3127		}
   3128
   3129		dev_info(dev, "enabling FLASH_DMA\n");
   3130		/* set flash dma transfer function to call */
   3131		ctrl->dma_trans = brcmnand_dma_trans;
   3132	} else	{
   3133		ret = brcmnand_edu_setup(pdev);
   3134		if (ret < 0)
   3135			goto err;
   3136
   3137		if (has_edu(ctrl))
   3138			/* set edu transfer function to call */
   3139			ctrl->dma_trans = brcmnand_edu_trans;
   3140	}
   3141
   3142	/* Disable automatic device ID config, direct addressing */
   3143	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
   3144			 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
   3145	/* Disable XOR addressing */
   3146	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
   3147
   3148	if (ctrl->features & BRCMNAND_HAS_WP) {
   3149		/* Permanently disable write protection */
   3150		if (wp_on == 2)
   3151			brcmnand_set_wp(ctrl, false);
   3152	} else {
   3153		wp_on = 0;
   3154	}
   3155
   3156	/* IRQ */
   3157	ctrl->irq = platform_get_irq_optional(pdev, 0);
   3158	if (ctrl->irq > 0) {
   3159		/*
   3160		 * Some SoCs integrate this controller (e.g., its interrupt bits) in
   3161		 * interesting ways
   3162		 */
   3163		if (soc) {
   3164			ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
   3165					       DRV_NAME, ctrl);
   3166
   3167			/* Enable interrupt */
   3168			ctrl->soc->ctlrdy_ack(ctrl->soc);
   3169			ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
   3170		} else {
   3171			/* Use standard interrupt infrastructure */
   3172			ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
   3173					       DRV_NAME, ctrl);
   3174		}
   3175		if (ret < 0) {
   3176			dev_err(dev, "can't allocate IRQ %d: error %d\n",
   3177				ctrl->irq, ret);
   3178			goto err;
   3179		}
   3180	}
   3181
   3182	for_each_available_child_of_node(dn, child) {
   3183		if (of_device_is_compatible(child, "brcm,nandcs")) {
   3184
   3185			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
   3186			if (!host) {
   3187				of_node_put(child);
   3188				ret = -ENOMEM;
   3189				goto err;
   3190			}
   3191			host->pdev = pdev;
   3192			host->ctrl = ctrl;
   3193
   3194			ret = of_property_read_u32(child, "reg", &host->cs);
   3195			if (ret) {
   3196				dev_err(dev, "can't get chip-select\n");
   3197				devm_kfree(dev, host);
   3198				continue;
   3199			}
   3200
   3201			nand_set_flash_node(&host->chip, child);
   3202
   3203			ret = brcmnand_init_cs(host, NULL);
   3204			if (ret) {
   3205				devm_kfree(dev, host);
   3206				continue; /* Try all chip-selects */
   3207			}
   3208
   3209			list_add_tail(&host->node, &ctrl->host_list);
   3210		}
   3211	}
   3212
   3213	if (!list_empty(&ctrl->host_list))
   3214		return 0;
   3215
   3216	if (!pd) {
   3217		ret = -ENODEV;
   3218		goto err;
   3219	}
   3220
   3221	/* If we got there we must have been probing via platform data */
   3222	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
   3223	if (!host) {
   3224		ret = -ENOMEM;
   3225		goto err;
   3226	}
   3227	host->pdev = pdev;
   3228	host->ctrl = ctrl;
   3229	host->cs = pd->chip_select;
   3230	host->chip.ecc.size = pd->ecc_stepsize;
   3231	host->chip.ecc.strength = pd->ecc_strength;
   3232
   3233	ret = brcmnand_init_cs(host, pd->part_probe_types);
   3234	if (ret)
   3235		goto err;
   3236
   3237	list_add_tail(&host->node, &ctrl->host_list);
   3238
   3239	/* No chip-selects could initialize properly */
   3240	if (list_empty(&ctrl->host_list)) {
   3241		ret = -ENODEV;
   3242		goto err;
   3243	}
   3244
   3245	return 0;
   3246
   3247err:
   3248	clk_disable_unprepare(ctrl->clk);
   3249	return ret;
   3250
   3251}
   3252EXPORT_SYMBOL_GPL(brcmnand_probe);
   3253
   3254int brcmnand_remove(struct platform_device *pdev)
   3255{
   3256	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
   3257	struct brcmnand_host *host;
   3258	struct nand_chip *chip;
   3259	int ret;
   3260
   3261	list_for_each_entry(host, &ctrl->host_list, node) {
   3262		chip = &host->chip;
   3263		ret = mtd_device_unregister(nand_to_mtd(chip));
   3264		WARN_ON(ret);
   3265		nand_cleanup(chip);
   3266	}
   3267
   3268	clk_disable_unprepare(ctrl->clk);
   3269
   3270	dev_set_drvdata(&pdev->dev, NULL);
   3271
   3272	return 0;
   3273}
   3274EXPORT_SYMBOL_GPL(brcmnand_remove);
   3275
   3276MODULE_LICENSE("GPL v2");
   3277MODULE_AUTHOR("Kevin Cernekee");
   3278MODULE_AUTHOR("Brian Norris");
   3279MODULE_DESCRIPTION("NAND driver for Broadcom chips");
   3280MODULE_ALIAS("platform:brcmnand");