cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-intel.c (34726B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Intel PCH/PCU SPI flash driver.
      4 *
      5 * Copyright (C) 2016 - 2022, Intel Corporation
      6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
      7 */
      8
      9#include <linux/iopoll.h>
     10#include <linux/module.h>
     11
     12#include <linux/mtd/partitions.h>
     13#include <linux/mtd/spi-nor.h>
     14
     15#include <linux/spi/flash.h>
     16#include <linux/spi/spi.h>
     17#include <linux/spi/spi-mem.h>
     18
     19#include "spi-intel.h"
     20
     21/* Offsets are from @ispi->base */
     22#define BFPREG				0x00
     23
     24#define HSFSTS_CTL			0x04
     25#define HSFSTS_CTL_FSMIE		BIT(31)
     26#define HSFSTS_CTL_FDBC_SHIFT		24
     27#define HSFSTS_CTL_FDBC_MASK		(0x3f << HSFSTS_CTL_FDBC_SHIFT)
     28
     29#define HSFSTS_CTL_FCYCLE_SHIFT		17
     30#define HSFSTS_CTL_FCYCLE_MASK		(0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
     31/* HW sequencer opcodes */
     32#define HSFSTS_CTL_FCYCLE_READ		(0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
     33#define HSFSTS_CTL_FCYCLE_WRITE		(0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
     34#define HSFSTS_CTL_FCYCLE_ERASE		(0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
     35#define HSFSTS_CTL_FCYCLE_ERASE_64K	(0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
     36#define HSFSTS_CTL_FCYCLE_RDID		(0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
     37#define HSFSTS_CTL_FCYCLE_WRSR		(0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
     38#define HSFSTS_CTL_FCYCLE_RDSR		(0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
     39
     40#define HSFSTS_CTL_FGO			BIT(16)
     41#define HSFSTS_CTL_FLOCKDN		BIT(15)
     42#define HSFSTS_CTL_FDV			BIT(14)
     43#define HSFSTS_CTL_SCIP			BIT(5)
     44#define HSFSTS_CTL_AEL			BIT(2)
     45#define HSFSTS_CTL_FCERR		BIT(1)
     46#define HSFSTS_CTL_FDONE		BIT(0)
     47
     48#define FADDR				0x08
     49#define DLOCK				0x0c
     50#define FDATA(n)			(0x10 + ((n) * 4))
     51
     52#define FRACC				0x50
     53
     54#define FREG(n)				(0x54 + ((n) * 4))
     55#define FREG_BASE_MASK			0x3fff
     56#define FREG_LIMIT_SHIFT		16
     57#define FREG_LIMIT_MASK			(0x03fff << FREG_LIMIT_SHIFT)
     58
     59/* Offset is from @ispi->pregs */
     60#define PR(n)				((n) * 4)
     61#define PR_WPE				BIT(31)
     62#define PR_LIMIT_SHIFT			16
     63#define PR_LIMIT_MASK			(0x3fff << PR_LIMIT_SHIFT)
     64#define PR_RPE				BIT(15)
     65#define PR_BASE_MASK			0x3fff
     66
     67/* Offsets are from @ispi->sregs */
     68#define SSFSTS_CTL			0x00
     69#define SSFSTS_CTL_FSMIE		BIT(23)
     70#define SSFSTS_CTL_DS			BIT(22)
     71#define SSFSTS_CTL_DBC_SHIFT		16
     72#define SSFSTS_CTL_SPOP			BIT(11)
     73#define SSFSTS_CTL_ACS			BIT(10)
     74#define SSFSTS_CTL_SCGO			BIT(9)
     75#define SSFSTS_CTL_COP_SHIFT		12
     76#define SSFSTS_CTL_FRS			BIT(7)
     77#define SSFSTS_CTL_DOFRS		BIT(6)
     78#define SSFSTS_CTL_AEL			BIT(4)
     79#define SSFSTS_CTL_FCERR		BIT(3)
     80#define SSFSTS_CTL_FDONE		BIT(2)
     81#define SSFSTS_CTL_SCIP			BIT(0)
     82
     83#define PREOP_OPTYPE			0x04
     84#define OPMENU0				0x08
     85#define OPMENU1				0x0c
     86
     87#define OPTYPE_READ_NO_ADDR		0
     88#define OPTYPE_WRITE_NO_ADDR		1
     89#define OPTYPE_READ_WITH_ADDR		2
     90#define OPTYPE_WRITE_WITH_ADDR		3
     91
     92/* CPU specifics */
     93#define BYT_PR				0x74
     94#define BYT_SSFSTS_CTL			0x90
     95#define BYT_FREG_NUM			5
     96#define BYT_PR_NUM			5
     97
     98#define LPT_PR				0x74
     99#define LPT_SSFSTS_CTL			0x90
    100#define LPT_FREG_NUM			5
    101#define LPT_PR_NUM			5
    102
    103#define BXT_PR				0x84
    104#define BXT_SSFSTS_CTL			0xa0
    105#define BXT_FREG_NUM			12
    106#define BXT_PR_NUM			6
    107
    108#define CNL_PR				0x84
    109#define CNL_FREG_NUM			6
    110#define CNL_PR_NUM			5
    111
    112#define LVSCC				0xc4
    113#define UVSCC				0xc8
    114#define ERASE_OPCODE_SHIFT		8
    115#define ERASE_OPCODE_MASK		(0xff << ERASE_OPCODE_SHIFT)
    116#define ERASE_64K_OPCODE_SHIFT		16
    117#define ERASE_64K_OPCODE_MASK		(0xff << ERASE_OPCODE_SHIFT)
    118
    119#define INTEL_SPI_TIMEOUT		5000 /* ms */
    120#define INTEL_SPI_FIFO_SZ		64
    121
    122/**
    123 * struct intel_spi - Driver private data
    124 * @dev: Device pointer
    125 * @info: Pointer to board specific info
    126 * @base: Beginning of MMIO space
    127 * @pregs: Start of protection registers
    128 * @sregs: Start of software sequencer registers
    129 * @master: Pointer to the SPI controller structure
    130 * @nregions: Maximum number of regions
    131 * @pr_num: Maximum number of protected range registers
    132 * @locked: Is SPI setting locked
    133 * @swseq_reg: Use SW sequencer in register reads/writes
    134 * @swseq_erase: Use SW sequencer in erase operation
    135 * @atomic_preopcode: Holds preopcode when atomic sequence is requested
    136 * @opcodes: Opcodes which are supported. This are programmed by BIOS
    137 *           before it locks down the controller.
    138 * @mem_ops: Pointer to SPI MEM ops supported by the controller
    139 */
    140struct intel_spi {
    141	struct device *dev;
    142	const struct intel_spi_boardinfo *info;
    143	void __iomem *base;
    144	void __iomem *pregs;
    145	void __iomem *sregs;
    146	struct spi_controller *master;
    147	size_t nregions;
    148	size_t pr_num;
    149	bool locked;
    150	bool swseq_reg;
    151	bool swseq_erase;
    152	u8 atomic_preopcode;
    153	u8 opcodes[8];
    154	const struct intel_spi_mem_op *mem_ops;
    155};
    156
    157struct intel_spi_mem_op {
    158	struct spi_mem_op mem_op;
    159	u32 replacement_op;
    160	int (*exec_op)(struct intel_spi *ispi,
    161		       const struct intel_spi_mem_op *iop,
    162		       const struct spi_mem_op *op);
    163};
    164
    165static bool writeable;
    166module_param(writeable, bool, 0);
    167MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
    168
    169static void intel_spi_dump_regs(struct intel_spi *ispi)
    170{
    171	u32 value;
    172	int i;
    173
    174	dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
    175
    176	value = readl(ispi->base + HSFSTS_CTL);
    177	dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
    178	if (value & HSFSTS_CTL_FLOCKDN)
    179		dev_dbg(ispi->dev, "-> Locked\n");
    180
    181	dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
    182	dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
    183
    184	for (i = 0; i < 16; i++)
    185		dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
    186			i, readl(ispi->base + FDATA(i)));
    187
    188	dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
    189
    190	for (i = 0; i < ispi->nregions; i++)
    191		dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
    192			readl(ispi->base + FREG(i)));
    193	for (i = 0; i < ispi->pr_num; i++)
    194		dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
    195			readl(ispi->pregs + PR(i)));
    196
    197	if (ispi->sregs) {
    198		value = readl(ispi->sregs + SSFSTS_CTL);
    199		dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
    200		dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
    201			readl(ispi->sregs + PREOP_OPTYPE));
    202		dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
    203			readl(ispi->sregs + OPMENU0));
    204		dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
    205			readl(ispi->sregs + OPMENU1));
    206	}
    207
    208	dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
    209	dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
    210
    211	dev_dbg(ispi->dev, "Protected regions:\n");
    212	for (i = 0; i < ispi->pr_num; i++) {
    213		u32 base, limit;
    214
    215		value = readl(ispi->pregs + PR(i));
    216		if (!(value & (PR_WPE | PR_RPE)))
    217			continue;
    218
    219		limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
    220		base = value & PR_BASE_MASK;
    221
    222		dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
    223			i, base << 12, (limit << 12) | 0xfff,
    224			value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
    225	}
    226
    227	dev_dbg(ispi->dev, "Flash regions:\n");
    228	for (i = 0; i < ispi->nregions; i++) {
    229		u32 region, base, limit;
    230
    231		region = readl(ispi->base + FREG(i));
    232		base = region & FREG_BASE_MASK;
    233		limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
    234
    235		if (base >= limit || (i > 0 && limit == 0))
    236			dev_dbg(ispi->dev, " %02d disabled\n", i);
    237		else
    238			dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
    239				i, base << 12, (limit << 12) | 0xfff);
    240	}
    241
    242	dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
    243		ispi->swseq_reg ? 'S' : 'H');
    244	dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
    245		ispi->swseq_erase ? 'S' : 'H');
    246}
    247
    248/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
    249static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
    250{
    251	size_t bytes;
    252	int i = 0;
    253
    254	if (size > INTEL_SPI_FIFO_SZ)
    255		return -EINVAL;
    256
    257	while (size > 0) {
    258		bytes = min_t(size_t, size, 4);
    259		memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
    260		size -= bytes;
    261		buf += bytes;
    262		i++;
    263	}
    264
    265	return 0;
    266}
    267
    268/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
    269static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
    270				 size_t size)
    271{
    272	size_t bytes;
    273	int i = 0;
    274
    275	if (size > INTEL_SPI_FIFO_SZ)
    276		return -EINVAL;
    277
    278	while (size > 0) {
    279		bytes = min_t(size_t, size, 4);
    280		memcpy_toio(ispi->base + FDATA(i), buf, bytes);
    281		size -= bytes;
    282		buf += bytes;
    283		i++;
    284	}
    285
    286	return 0;
    287}
    288
    289static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
    290{
    291	u32 val;
    292
    293	return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
    294				  !(val & HSFSTS_CTL_SCIP), 0,
    295				  INTEL_SPI_TIMEOUT * 1000);
    296}
    297
    298static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
    299{
    300	u32 val;
    301
    302	return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
    303				  !(val & SSFSTS_CTL_SCIP), 0,
    304				  INTEL_SPI_TIMEOUT * 1000);
    305}
    306
    307static bool intel_spi_set_writeable(struct intel_spi *ispi)
    308{
    309	if (!ispi->info->set_writeable)
    310		return false;
    311
    312	return ispi->info->set_writeable(ispi->base, ispi->info->data);
    313}
    314
    315static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
    316{
    317	int i;
    318	int preop;
    319
    320	if (ispi->locked) {
    321		for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
    322			if (ispi->opcodes[i] == opcode)
    323				return i;
    324
    325		return -EINVAL;
    326	}
    327
    328	/* The lock is off, so just use index 0 */
    329	writel(opcode, ispi->sregs + OPMENU0);
    330	preop = readw(ispi->sregs + PREOP_OPTYPE);
    331	writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
    332
    333	return 0;
    334}
    335
    336static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
    337{
    338	u32 val, status;
    339	int ret;
    340
    341	val = readl(ispi->base + HSFSTS_CTL);
    342	val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
    343
    344	switch (opcode) {
    345	case SPINOR_OP_RDID:
    346		val |= HSFSTS_CTL_FCYCLE_RDID;
    347		break;
    348	case SPINOR_OP_WRSR:
    349		val |= HSFSTS_CTL_FCYCLE_WRSR;
    350		break;
    351	case SPINOR_OP_RDSR:
    352		val |= HSFSTS_CTL_FCYCLE_RDSR;
    353		break;
    354	default:
    355		return -EINVAL;
    356	}
    357
    358	if (len > INTEL_SPI_FIFO_SZ)
    359		return -EINVAL;
    360
    361	val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
    362	val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
    363	val |= HSFSTS_CTL_FGO;
    364	writel(val, ispi->base + HSFSTS_CTL);
    365
    366	ret = intel_spi_wait_hw_busy(ispi);
    367	if (ret)
    368		return ret;
    369
    370	status = readl(ispi->base + HSFSTS_CTL);
    371	if (status & HSFSTS_CTL_FCERR)
    372		return -EIO;
    373	else if (status & HSFSTS_CTL_AEL)
    374		return -EACCES;
    375
    376	return 0;
    377}
    378
    379static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
    380			      int optype)
    381{
    382	u32 val = 0, status;
    383	u8 atomic_preopcode;
    384	int ret;
    385
    386	ret = intel_spi_opcode_index(ispi, opcode, optype);
    387	if (ret < 0)
    388		return ret;
    389
    390	if (len > INTEL_SPI_FIFO_SZ)
    391		return -EINVAL;
    392
    393	/*
    394	 * Always clear it after each SW sequencer operation regardless
    395	 * of whether it is successful or not.
    396	 */
    397	atomic_preopcode = ispi->atomic_preopcode;
    398	ispi->atomic_preopcode = 0;
    399
    400	/* Only mark 'Data Cycle' bit when there is data to be transferred */
    401	if (len > 0)
    402		val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
    403	val |= ret << SSFSTS_CTL_COP_SHIFT;
    404	val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
    405	val |= SSFSTS_CTL_SCGO;
    406	if (atomic_preopcode) {
    407		u16 preop;
    408
    409		switch (optype) {
    410		case OPTYPE_WRITE_NO_ADDR:
    411		case OPTYPE_WRITE_WITH_ADDR:
    412			/* Pick matching preopcode for the atomic sequence */
    413			preop = readw(ispi->sregs + PREOP_OPTYPE);
    414			if ((preop & 0xff) == atomic_preopcode)
    415				; /* Do nothing */
    416			else if ((preop >> 8) == atomic_preopcode)
    417				val |= SSFSTS_CTL_SPOP;
    418			else
    419				return -EINVAL;
    420
    421			/* Enable atomic sequence */
    422			val |= SSFSTS_CTL_ACS;
    423			break;
    424
    425		default:
    426			return -EINVAL;
    427		}
    428	}
    429	writel(val, ispi->sregs + SSFSTS_CTL);
    430
    431	ret = intel_spi_wait_sw_busy(ispi);
    432	if (ret)
    433		return ret;
    434
    435	status = readl(ispi->sregs + SSFSTS_CTL);
    436	if (status & SSFSTS_CTL_FCERR)
    437		return -EIO;
    438	else if (status & SSFSTS_CTL_AEL)
    439		return -EACCES;
    440
    441	return 0;
    442}
    443
    444static int intel_spi_read_reg(struct intel_spi *ispi,
    445			      const struct intel_spi_mem_op *iop,
    446			      const struct spi_mem_op *op)
    447{
    448	size_t nbytes = op->data.nbytes;
    449	u8 opcode = op->cmd.opcode;
    450	int ret;
    451
    452	/* Address of the first chip */
    453	writel(0, ispi->base + FADDR);
    454
    455	if (ispi->swseq_reg)
    456		ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
    457					 OPTYPE_READ_NO_ADDR);
    458	else
    459		ret = intel_spi_hw_cycle(ispi, opcode, nbytes);
    460
    461	if (ret)
    462		return ret;
    463
    464	return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
    465}
    466
    467static int intel_spi_write_reg(struct intel_spi *ispi,
    468			       const struct intel_spi_mem_op *iop,
    469			       const struct spi_mem_op *op)
    470{
    471	size_t nbytes = op->data.nbytes;
    472	u8 opcode = op->cmd.opcode;
    473	int ret;
    474
    475	/*
    476	 * This is handled with atomic operation and preop code in Intel
    477	 * controller so we only verify that it is available. If the
    478	 * controller is not locked, program the opcode to the PREOP
    479	 * register for later use.
    480	 *
    481	 * When hardware sequencer is used there is no need to program
    482	 * any opcodes (it handles them automatically as part of a command).
    483	 */
    484	if (opcode == SPINOR_OP_WREN) {
    485		u16 preop;
    486
    487		if (!ispi->swseq_reg)
    488			return 0;
    489
    490		preop = readw(ispi->sregs + PREOP_OPTYPE);
    491		if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
    492			if (ispi->locked)
    493				return -EINVAL;
    494			writel(opcode, ispi->sregs + PREOP_OPTYPE);
    495		}
    496
    497		/*
    498		 * This enables atomic sequence on next SW sycle. Will
    499		 * be cleared after next operation.
    500		 */
    501		ispi->atomic_preopcode = opcode;
    502		return 0;
    503	}
    504
    505	/*
    506	 * We hope that HW sequencer will do the right thing automatically and
    507	 * with the SW sequencer we cannot use preopcode anyway, so just ignore
    508	 * the Write Disable operation and pretend it was completed
    509	 * successfully.
    510	 */
    511	if (opcode == SPINOR_OP_WRDI)
    512		return 0;
    513
    514	writel(0, ispi->base + FADDR);
    515
    516	/* Write the value beforehand */
    517	ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
    518	if (ret)
    519		return ret;
    520
    521	if (ispi->swseq_reg)
    522		return intel_spi_sw_cycle(ispi, opcode, nbytes,
    523					  OPTYPE_WRITE_NO_ADDR);
    524	return intel_spi_hw_cycle(ispi, opcode, nbytes);
    525}
    526
    527static int intel_spi_read(struct intel_spi *ispi,
    528			  const struct intel_spi_mem_op *iop,
    529			  const struct spi_mem_op *op)
    530{
    531	void *read_buf = op->data.buf.in;
    532	size_t block_size, nbytes = op->data.nbytes;
    533	u32 addr = op->addr.val;
    534	u32 val, status;
    535	int ret;
    536
    537	/*
    538	 * Atomic sequence is not expected with HW sequencer reads. Make
    539	 * sure it is cleared regardless.
    540	 */
    541	if (WARN_ON_ONCE(ispi->atomic_preopcode))
    542		ispi->atomic_preopcode = 0;
    543
    544	while (nbytes > 0) {
    545		block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
    546
    547		/* Read cannot cross 4K boundary */
    548		block_size = min_t(loff_t, addr + block_size,
    549				   round_up(addr + 1, SZ_4K)) - addr;
    550
    551		writel(addr, ispi->base + FADDR);
    552
    553		val = readl(ispi->base + HSFSTS_CTL);
    554		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
    555		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
    556		val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
    557		val |= HSFSTS_CTL_FCYCLE_READ;
    558		val |= HSFSTS_CTL_FGO;
    559		writel(val, ispi->base + HSFSTS_CTL);
    560
    561		ret = intel_spi_wait_hw_busy(ispi);
    562		if (ret)
    563			return ret;
    564
    565		status = readl(ispi->base + HSFSTS_CTL);
    566		if (status & HSFSTS_CTL_FCERR)
    567			ret = -EIO;
    568		else if (status & HSFSTS_CTL_AEL)
    569			ret = -EACCES;
    570
    571		if (ret < 0) {
    572			dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
    573			return ret;
    574		}
    575
    576		ret = intel_spi_read_block(ispi, read_buf, block_size);
    577		if (ret)
    578			return ret;
    579
    580		nbytes -= block_size;
    581		addr += block_size;
    582		read_buf += block_size;
    583	}
    584
    585	return 0;
    586}
    587
    588static int intel_spi_write(struct intel_spi *ispi,
    589			   const struct intel_spi_mem_op *iop,
    590			   const struct spi_mem_op *op)
    591{
    592	size_t block_size, nbytes = op->data.nbytes;
    593	const void *write_buf = op->data.buf.out;
    594	u32 addr = op->addr.val;
    595	u32 val, status;
    596	int ret;
    597
    598	/* Not needed with HW sequencer write, make sure it is cleared */
    599	ispi->atomic_preopcode = 0;
    600
    601	while (nbytes > 0) {
    602		block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
    603
    604		/* Write cannot cross 4K boundary */
    605		block_size = min_t(loff_t, addr + block_size,
    606				   round_up(addr + 1, SZ_4K)) - addr;
    607
    608		writel(addr, ispi->base + FADDR);
    609
    610		val = readl(ispi->base + HSFSTS_CTL);
    611		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
    612		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
    613		val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
    614		val |= HSFSTS_CTL_FCYCLE_WRITE;
    615
    616		ret = intel_spi_write_block(ispi, write_buf, block_size);
    617		if (ret) {
    618			dev_err(ispi->dev, "failed to write block\n");
    619			return ret;
    620		}
    621
    622		/* Start the write now */
    623		val |= HSFSTS_CTL_FGO;
    624		writel(val, ispi->base + HSFSTS_CTL);
    625
    626		ret = intel_spi_wait_hw_busy(ispi);
    627		if (ret) {
    628			dev_err(ispi->dev, "timeout\n");
    629			return ret;
    630		}
    631
    632		status = readl(ispi->base + HSFSTS_CTL);
    633		if (status & HSFSTS_CTL_FCERR)
    634			ret = -EIO;
    635		else if (status & HSFSTS_CTL_AEL)
    636			ret = -EACCES;
    637
    638		if (ret < 0) {
    639			dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
    640			return ret;
    641		}
    642
    643		nbytes -= block_size;
    644		addr += block_size;
    645		write_buf += block_size;
    646	}
    647
    648	return 0;
    649}
    650
    651static int intel_spi_erase(struct intel_spi *ispi,
    652			   const struct intel_spi_mem_op *iop,
    653			   const struct spi_mem_op *op)
    654{
    655	u8 opcode = op->cmd.opcode;
    656	u32 addr = op->addr.val;
    657	u32 val, status;
    658	int ret;
    659
    660	writel(addr, ispi->base + FADDR);
    661
    662	if (ispi->swseq_erase)
    663		return intel_spi_sw_cycle(ispi, opcode, 0,
    664					  OPTYPE_WRITE_WITH_ADDR);
    665
    666	/* Not needed with HW sequencer erase, make sure it is cleared */
    667	ispi->atomic_preopcode = 0;
    668
    669	val = readl(ispi->base + HSFSTS_CTL);
    670	val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
    671	val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
    672	val |= HSFSTS_CTL_FGO;
    673	val |= iop->replacement_op;
    674	writel(val, ispi->base + HSFSTS_CTL);
    675
    676	ret = intel_spi_wait_hw_busy(ispi);
    677	if (ret)
    678		return ret;
    679
    680	status = readl(ispi->base + HSFSTS_CTL);
    681	if (status & HSFSTS_CTL_FCERR)
    682		return -EIO;
    683	if (status & HSFSTS_CTL_AEL)
    684		return -EACCES;
    685
    686	return 0;
    687}
    688
    689static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
    690				 const struct spi_mem_op *op)
    691{
    692	if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
    693	    iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
    694	    iop->mem_op.cmd.dtr != op->cmd.dtr ||
    695	    iop->mem_op.cmd.opcode != op->cmd.opcode)
    696		return false;
    697
    698	if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
    699	    iop->mem_op.addr.dtr != op->addr.dtr)
    700		return false;
    701
    702	if (iop->mem_op.data.dir != op->data.dir ||
    703	    iop->mem_op.data.dtr != op->data.dtr)
    704		return false;
    705
    706	if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
    707		if (iop->mem_op.data.buswidth != op->data.buswidth)
    708			return false;
    709	}
    710
    711	return true;
    712}
    713
    714static const struct intel_spi_mem_op *
    715intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
    716{
    717	const struct intel_spi_mem_op *iop;
    718
    719	for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
    720		if (intel_spi_cmp_mem_op(iop, op))
    721			break;
    722	}
    723
    724	return iop->mem_op.cmd.opcode ? iop : NULL;
    725}
    726
    727static bool intel_spi_supports_mem_op(struct spi_mem *mem,
    728				      const struct spi_mem_op *op)
    729{
    730	struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
    731	const struct intel_spi_mem_op *iop;
    732
    733	iop = intel_spi_match_mem_op(ispi, op);
    734	if (!iop) {
    735		dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
    736		return false;
    737	}
    738
    739	/*
    740	 * For software sequencer check that the opcode is actually
    741	 * present in the opmenu if it is locked.
    742	 */
    743	if (ispi->swseq_reg && ispi->locked) {
    744		int i;
    745
    746		/* Check if it is in the locked opcodes list */
    747		for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
    748			if (ispi->opcodes[i] == op->cmd.opcode)
    749				return true;
    750		}
    751
    752		dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
    753		return false;
    754	}
    755
    756	return true;
    757}
    758
    759static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
    760{
    761	struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
    762	const struct intel_spi_mem_op *iop;
    763
    764	iop = intel_spi_match_mem_op(ispi, op);
    765	if (!iop)
    766		return -EOPNOTSUPP;
    767
    768	return iop->exec_op(ispi, iop, op);
    769}
    770
    771static const char *intel_spi_get_name(struct spi_mem *mem)
    772{
    773	const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
    774
    775	/*
    776	 * Return name of the flash controller device to be compatible
    777	 * with the MTD version.
    778	 */
    779	return dev_name(ispi->dev);
    780}
    781
    782static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
    783{
    784	struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
    785	const struct intel_spi_mem_op *iop;
    786
    787	iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
    788	if (!iop)
    789		return -EOPNOTSUPP;
    790
    791	desc->priv = (void *)iop;
    792	return 0;
    793}
    794
    795static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
    796				     size_t len, void *buf)
    797{
    798	struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
    799	const struct intel_spi_mem_op *iop = desc->priv;
    800	struct spi_mem_op op = desc->info.op_tmpl;
    801	int ret;
    802
    803	/* Fill in the gaps */
    804	op.addr.val = offs;
    805	op.data.nbytes = len;
    806	op.data.buf.in = buf;
    807
    808	ret = iop->exec_op(ispi, iop, &op);
    809	return ret ? ret : len;
    810}
    811
    812static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
    813				      size_t len, const void *buf)
    814{
    815	struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
    816	const struct intel_spi_mem_op *iop = desc->priv;
    817	struct spi_mem_op op = desc->info.op_tmpl;
    818	int ret;
    819
    820	op.addr.val = offs;
    821	op.data.nbytes = len;
    822	op.data.buf.out = buf;
    823
    824	ret = iop->exec_op(ispi, iop, &op);
    825	return ret ? ret : len;
    826}
    827
    828static const struct spi_controller_mem_ops intel_spi_mem_ops = {
    829	.supports_op = intel_spi_supports_mem_op,
    830	.exec_op = intel_spi_exec_mem_op,
    831	.get_name = intel_spi_get_name,
    832	.dirmap_create = intel_spi_dirmap_create,
    833	.dirmap_read = intel_spi_dirmap_read,
    834	.dirmap_write = intel_spi_dirmap_write,
    835};
    836
    837#define INTEL_SPI_OP_ADDR(__nbytes)					\
    838	{								\
    839		.nbytes = __nbytes,					\
    840	}
    841
    842#define INTEL_SPI_OP_NO_DATA						\
    843	{								\
    844		.dir = SPI_MEM_NO_DATA,					\
    845	}
    846
    847#define INTEL_SPI_OP_DATA_IN(__buswidth)				\
    848	{								\
    849		.dir = SPI_MEM_DATA_IN,					\
    850		.buswidth = __buswidth,					\
    851	}
    852
    853#define INTEL_SPI_OP_DATA_OUT(__buswidth)				\
    854	{								\
    855		.dir = SPI_MEM_DATA_OUT,				\
    856		.buswidth = __buswidth,					\
    857	}
    858
    859#define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op)		\
    860	{								\
    861		.mem_op = {						\
    862			.cmd = __cmd,					\
    863			.addr = __addr,					\
    864			.data = __data,					\
    865		},							\
    866		.exec_op = __exec_op,					\
    867	}
    868
    869#define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl)	\
    870	{								\
    871		.mem_op = {						\
    872			.cmd = __cmd,					\
    873			.addr = __addr,					\
    874			.data = __data,					\
    875		},							\
    876		.exec_op = __exec_op,					\
    877		.replacement_op = __repl,				\
    878	}
    879
    880/*
    881 * The controller handles pretty much everything internally based on the
    882 * SFDP data but we want to make sure we only support the operations
    883 * actually possible. Only check buswidth and transfer direction, the
    884 * core validates data.
    885 */
    886#define INTEL_SPI_GENERIC_OPS						\
    887	/* Status register operations */				\
    888	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),		\
    889			 SPI_MEM_OP_NO_ADDR,				\
    890			 INTEL_SPI_OP_DATA_IN(1),			\
    891			 intel_spi_read_reg),				\
    892	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),		\
    893			 SPI_MEM_OP_NO_ADDR,				\
    894			 INTEL_SPI_OP_DATA_IN(1),			\
    895			 intel_spi_read_reg),				\
    896	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),		\
    897			 SPI_MEM_OP_NO_ADDR,				\
    898			 INTEL_SPI_OP_DATA_OUT(1),			\
    899			 intel_spi_write_reg),				\
    900	/* Normal read */						\
    901	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
    902			 INTEL_SPI_OP_ADDR(3),				\
    903			 INTEL_SPI_OP_DATA_IN(1),			\
    904			 intel_spi_read),				\
    905	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
    906			 INTEL_SPI_OP_ADDR(3),				\
    907			 INTEL_SPI_OP_DATA_IN(2),			\
    908			 intel_spi_read),				\
    909	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
    910			 INTEL_SPI_OP_ADDR(3),				\
    911			 INTEL_SPI_OP_DATA_IN(4),			\
    912			 intel_spi_read),				\
    913	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
    914			 INTEL_SPI_OP_ADDR(4),				\
    915			 INTEL_SPI_OP_DATA_IN(1),			\
    916			 intel_spi_read),				\
    917	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
    918			 INTEL_SPI_OP_ADDR(4),				\
    919			 INTEL_SPI_OP_DATA_IN(2),			\
    920			 intel_spi_read),				\
    921	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
    922			 INTEL_SPI_OP_ADDR(4),				\
    923			 INTEL_SPI_OP_DATA_IN(4),			\
    924			 intel_spi_read),				\
    925	/* Fast read */							\
    926	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
    927			 INTEL_SPI_OP_ADDR(3),				\
    928			 INTEL_SPI_OP_DATA_IN(1),			\
    929			 intel_spi_read),				\
    930	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
    931			 INTEL_SPI_OP_ADDR(3),				\
    932			 INTEL_SPI_OP_DATA_IN(2),			\
    933			 intel_spi_read),				\
    934	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
    935			 INTEL_SPI_OP_ADDR(3),				\
    936			 INTEL_SPI_OP_DATA_IN(4),			\
    937			 intel_spi_read),				\
    938	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
    939			 INTEL_SPI_OP_ADDR(4),				\
    940			 INTEL_SPI_OP_DATA_IN(1),			\
    941			 intel_spi_read),				\
    942	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
    943			 INTEL_SPI_OP_ADDR(4),				\
    944			 INTEL_SPI_OP_DATA_IN(2),			\
    945			 intel_spi_read),				\
    946	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
    947			 INTEL_SPI_OP_ADDR(4),				\
    948			 INTEL_SPI_OP_DATA_IN(4),			\
    949			 intel_spi_read),				\
    950	/* Read with 4-byte address opcode */				\
    951	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),		\
    952			 INTEL_SPI_OP_ADDR(4),				\
    953			 INTEL_SPI_OP_DATA_IN(1),			\
    954			 intel_spi_read),				\
    955	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),		\
    956			 INTEL_SPI_OP_ADDR(4),				\
    957			 INTEL_SPI_OP_DATA_IN(2),			\
    958			 intel_spi_read),				\
    959	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),		\
    960			 INTEL_SPI_OP_ADDR(4),				\
    961			 INTEL_SPI_OP_DATA_IN(4),			\
    962			 intel_spi_read),				\
    963	/* Fast read with 4-byte address opcode */			\
    964	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),	\
    965			 INTEL_SPI_OP_ADDR(4),				\
    966			 INTEL_SPI_OP_DATA_IN(1),			\
    967			 intel_spi_read),				\
    968	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),	\
    969			 INTEL_SPI_OP_ADDR(4),				\
    970			 INTEL_SPI_OP_DATA_IN(2),			\
    971			 intel_spi_read),				\
    972	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),	\
    973			 INTEL_SPI_OP_ADDR(4),				\
    974			 INTEL_SPI_OP_DATA_IN(4),			\
    975			 intel_spi_read),				\
    976	/* Write operations */						\
    977	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),		\
    978			 INTEL_SPI_OP_ADDR(3),				\
    979			 INTEL_SPI_OP_DATA_OUT(1),			\
    980			 intel_spi_write),				\
    981	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),		\
    982			 INTEL_SPI_OP_ADDR(4),				\
    983			 INTEL_SPI_OP_DATA_OUT(1),			\
    984			 intel_spi_write),				\
    985	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1),		\
    986			 INTEL_SPI_OP_ADDR(4),				\
    987			 INTEL_SPI_OP_DATA_OUT(1),			\
    988			 intel_spi_write),				\
    989	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),		\
    990			 SPI_MEM_OP_NO_ADDR,				\
    991			 SPI_MEM_OP_NO_DATA,				\
    992			 intel_spi_write_reg),				\
    993	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),		\
    994			 SPI_MEM_OP_NO_ADDR,				\
    995			 SPI_MEM_OP_NO_DATA,				\
    996			 intel_spi_write_reg),				\
    997	/* Erase operations */						\
    998	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),	\
    999			      INTEL_SPI_OP_ADDR(3),			\
   1000			      SPI_MEM_OP_NO_DATA,			\
   1001			      intel_spi_erase,				\
   1002			      HSFSTS_CTL_FCYCLE_ERASE),			\
   1003	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),	\
   1004			      INTEL_SPI_OP_ADDR(4),			\
   1005			      SPI_MEM_OP_NO_DATA,			\
   1006			      intel_spi_erase,				\
   1007			      HSFSTS_CTL_FCYCLE_ERASE),			\
   1008	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1),	\
   1009			      INTEL_SPI_OP_ADDR(4),			\
   1010			      SPI_MEM_OP_NO_DATA,			\
   1011			      intel_spi_erase,				\
   1012			      HSFSTS_CTL_FCYCLE_ERASE)			\
   1013
   1014static const struct intel_spi_mem_op generic_mem_ops[] = {
   1015	INTEL_SPI_GENERIC_OPS,
   1016	{ },
   1017};
   1018
   1019static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
   1020	INTEL_SPI_GENERIC_OPS,
   1021	/* 64k sector erase operations */
   1022	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
   1023			      INTEL_SPI_OP_ADDR(3),
   1024			      SPI_MEM_OP_NO_DATA,
   1025			      intel_spi_erase,
   1026			      HSFSTS_CTL_FCYCLE_ERASE_64K),
   1027	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
   1028			      INTEL_SPI_OP_ADDR(4),
   1029			      SPI_MEM_OP_NO_DATA,
   1030			      intel_spi_erase,
   1031			      HSFSTS_CTL_FCYCLE_ERASE_64K),
   1032	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
   1033			      INTEL_SPI_OP_ADDR(4),
   1034			      SPI_MEM_OP_NO_DATA,
   1035			      intel_spi_erase,
   1036			      HSFSTS_CTL_FCYCLE_ERASE_64K),
   1037	{ },
   1038};
   1039
   1040static int intel_spi_init(struct intel_spi *ispi)
   1041{
   1042	u32 opmenu0, opmenu1, lvscc, uvscc, val;
   1043	bool erase_64k = false;
   1044	int i;
   1045
   1046	switch (ispi->info->type) {
   1047	case INTEL_SPI_BYT:
   1048		ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
   1049		ispi->pregs = ispi->base + BYT_PR;
   1050		ispi->nregions = BYT_FREG_NUM;
   1051		ispi->pr_num = BYT_PR_NUM;
   1052		ispi->swseq_reg = true;
   1053		break;
   1054
   1055	case INTEL_SPI_LPT:
   1056		ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
   1057		ispi->pregs = ispi->base + LPT_PR;
   1058		ispi->nregions = LPT_FREG_NUM;
   1059		ispi->pr_num = LPT_PR_NUM;
   1060		ispi->swseq_reg = true;
   1061		break;
   1062
   1063	case INTEL_SPI_BXT:
   1064		ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
   1065		ispi->pregs = ispi->base + BXT_PR;
   1066		ispi->nregions = BXT_FREG_NUM;
   1067		ispi->pr_num = BXT_PR_NUM;
   1068		erase_64k = true;
   1069		break;
   1070
   1071	case INTEL_SPI_CNL:
   1072		ispi->sregs = NULL;
   1073		ispi->pregs = ispi->base + CNL_PR;
   1074		ispi->nregions = CNL_FREG_NUM;
   1075		ispi->pr_num = CNL_PR_NUM;
   1076		break;
   1077
   1078	default:
   1079		return -EINVAL;
   1080	}
   1081
   1082	/* Try to disable write protection if user asked to do so */
   1083	if (writeable && !intel_spi_set_writeable(ispi)) {
   1084		dev_warn(ispi->dev, "can't disable chip write protection\n");
   1085		writeable = false;
   1086	}
   1087
   1088	/* Disable #SMI generation from HW sequencer */
   1089	val = readl(ispi->base + HSFSTS_CTL);
   1090	val &= ~HSFSTS_CTL_FSMIE;
   1091	writel(val, ispi->base + HSFSTS_CTL);
   1092
   1093	/*
   1094	 * Determine whether erase operation should use HW or SW sequencer.
   1095	 *
   1096	 * The HW sequencer has a predefined list of opcodes, with only the
   1097	 * erase opcode being programmable in LVSCC and UVSCC registers.
   1098	 * If these registers don't contain a valid erase opcode, erase
   1099	 * cannot be done using HW sequencer.
   1100	 */
   1101	lvscc = readl(ispi->base + LVSCC);
   1102	uvscc = readl(ispi->base + UVSCC);
   1103	if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
   1104		ispi->swseq_erase = true;
   1105	/* SPI controller on Intel BXT supports 64K erase opcode */
   1106	if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
   1107		if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
   1108		    !(uvscc & ERASE_64K_OPCODE_MASK))
   1109			erase_64k = false;
   1110
   1111	if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
   1112		dev_err(ispi->dev, "software sequencer not supported, but required\n");
   1113		return -EINVAL;
   1114	}
   1115
   1116	/*
   1117	 * Some controllers can only do basic operations using hardware
   1118	 * sequencer. All other operations are supposed to be carried out
   1119	 * using software sequencer.
   1120	 */
   1121	if (ispi->swseq_reg) {
   1122		/* Disable #SMI generation from SW sequencer */
   1123		val = readl(ispi->sregs + SSFSTS_CTL);
   1124		val &= ~SSFSTS_CTL_FSMIE;
   1125		writel(val, ispi->sregs + SSFSTS_CTL);
   1126	}
   1127
   1128	/* Check controller's lock status */
   1129	val = readl(ispi->base + HSFSTS_CTL);
   1130	ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
   1131
   1132	if (ispi->locked && ispi->sregs) {
   1133		/*
   1134		 * BIOS programs allowed opcodes and then locks down the
   1135		 * register. So read back what opcodes it decided to support.
   1136		 * That's the set we are going to support as well.
   1137		 */
   1138		opmenu0 = readl(ispi->sregs + OPMENU0);
   1139		opmenu1 = readl(ispi->sregs + OPMENU1);
   1140
   1141		if (opmenu0 && opmenu1) {
   1142			for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
   1143				ispi->opcodes[i] = opmenu0 >> i * 8;
   1144				ispi->opcodes[i + 4] = opmenu1 >> i * 8;
   1145			}
   1146		}
   1147	}
   1148
   1149	if (erase_64k) {
   1150		dev_dbg(ispi->dev, "Using erase_64k memory operations");
   1151		ispi->mem_ops = erase_64k_mem_ops;
   1152	} else {
   1153		dev_dbg(ispi->dev, "Using generic memory operations");
   1154		ispi->mem_ops = generic_mem_ops;
   1155	}
   1156
   1157	intel_spi_dump_regs(ispi);
   1158	return 0;
   1159}
   1160
   1161static bool intel_spi_is_protected(const struct intel_spi *ispi,
   1162				   unsigned int base, unsigned int limit)
   1163{
   1164	int i;
   1165
   1166	for (i = 0; i < ispi->pr_num; i++) {
   1167		u32 pr_base, pr_limit, pr_value;
   1168
   1169		pr_value = readl(ispi->pregs + PR(i));
   1170		if (!(pr_value & (PR_WPE | PR_RPE)))
   1171			continue;
   1172
   1173		pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
   1174		pr_base = pr_value & PR_BASE_MASK;
   1175
   1176		if (pr_base >= base && pr_limit <= limit)
   1177			return true;
   1178	}
   1179
   1180	return false;
   1181}
   1182
   1183/*
   1184 * There will be a single partition holding all enabled flash regions. We
   1185 * call this "BIOS".
   1186 */
   1187static void intel_spi_fill_partition(struct intel_spi *ispi,
   1188				     struct mtd_partition *part)
   1189{
   1190	u64 end;
   1191	int i;
   1192
   1193	memset(part, 0, sizeof(*part));
   1194
   1195	/* Start from the mandatory descriptor region */
   1196	part->size = 4096;
   1197	part->name = "BIOS";
   1198
   1199	/*
   1200	 * Now try to find where this partition ends based on the flash
   1201	 * region registers.
   1202	 */
   1203	for (i = 1; i < ispi->nregions; i++) {
   1204		u32 region, base, limit;
   1205
   1206		region = readl(ispi->base + FREG(i));
   1207		base = region & FREG_BASE_MASK;
   1208		limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
   1209
   1210		if (base >= limit || limit == 0)
   1211			continue;
   1212
   1213		/*
   1214		 * If any of the regions have protection bits set, make the
   1215		 * whole partition read-only to be on the safe side.
   1216		 *
   1217		 * Also if the user did not ask the chip to be writeable
   1218		 * mask the bit too.
   1219		 */
   1220		if (!writeable || intel_spi_is_protected(ispi, base, limit))
   1221			part->mask_flags |= MTD_WRITEABLE;
   1222
   1223		end = (limit << 12) + 4096;
   1224		if (end > part->size)
   1225			part->size = end;
   1226	}
   1227}
   1228
   1229static int intel_spi_populate_chip(struct intel_spi *ispi)
   1230{
   1231	struct flash_platform_data *pdata;
   1232	struct spi_board_info chip;
   1233
   1234	pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
   1235	if (!pdata)
   1236		return -ENOMEM;
   1237
   1238	pdata->nr_parts = 1;
   1239	pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts),
   1240				    pdata->nr_parts, GFP_KERNEL);
   1241	if (!pdata->parts)
   1242		return -ENOMEM;
   1243
   1244	intel_spi_fill_partition(ispi, pdata->parts);
   1245
   1246	memset(&chip, 0, sizeof(chip));
   1247	snprintf(chip.modalias, 8, "spi-nor");
   1248	chip.platform_data = pdata;
   1249
   1250	return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV;
   1251}
   1252
   1253/**
   1254 * intel_spi_probe() - Probe the Intel SPI flash controller
   1255 * @dev: Pointer to the parent device
   1256 * @mem: MMIO resource
   1257 * @info: Platform specific information
   1258 *
   1259 * Probes Intel SPI flash controller and creates the flash chip device.
   1260 * Returns %0 on success and negative errno in case of failure.
   1261 */
   1262int intel_spi_probe(struct device *dev, struct resource *mem,
   1263		    const struct intel_spi_boardinfo *info)
   1264{
   1265	struct spi_controller *master;
   1266	struct intel_spi *ispi;
   1267	int ret;
   1268
   1269	master = devm_spi_alloc_master(dev, sizeof(*ispi));
   1270	if (!master)
   1271		return -ENOMEM;
   1272
   1273	master->mem_ops = &intel_spi_mem_ops;
   1274
   1275	ispi = spi_master_get_devdata(master);
   1276
   1277	ispi->base = devm_ioremap_resource(dev, mem);
   1278	if (IS_ERR(ispi->base))
   1279		return PTR_ERR(ispi->base);
   1280
   1281	ispi->dev = dev;
   1282	ispi->master = master;
   1283	ispi->info = info;
   1284
   1285	ret = intel_spi_init(ispi);
   1286	if (ret)
   1287		return ret;
   1288
   1289	ret = devm_spi_register_master(dev, master);
   1290	if (ret)
   1291		return ret;
   1292
   1293	return intel_spi_populate_chip(ispi);
   1294}
   1295EXPORT_SYMBOL_GPL(intel_spi_probe);
   1296
   1297MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
   1298MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
   1299MODULE_LICENSE("GPL v2");