cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-cadence-quadspi.c (49425B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2//
      3// Driver for Cadence QSPI Controller
      4//
      5// Copyright Altera Corporation (C) 2012-2014. All rights reserved.
      6// Copyright Intel Corporation (C) 2019-2020. All rights reserved.
      7// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
      8
      9#include <linux/clk.h>
     10#include <linux/completion.h>
     11#include <linux/delay.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/dmaengine.h>
     14#include <linux/err.h>
     15#include <linux/errno.h>
     16#include <linux/firmware/xlnx-zynqmp.h>
     17#include <linux/interrupt.h>
     18#include <linux/io.h>
     19#include <linux/iopoll.h>
     20#include <linux/jiffies.h>
     21#include <linux/kernel.h>
     22#include <linux/log2.h>
     23#include <linux/module.h>
     24#include <linux/of_device.h>
     25#include <linux/of.h>
     26#include <linux/platform_device.h>
     27#include <linux/pm_runtime.h>
     28#include <linux/reset.h>
     29#include <linux/sched.h>
     30#include <linux/spi/spi.h>
     31#include <linux/spi/spi-mem.h>
     32#include <linux/timer.h>
     33
     34#define CQSPI_NAME			"cadence-qspi"
     35#define CQSPI_MAX_CHIPSELECT		16
     36
     37/* Quirks */
     38#define CQSPI_NEEDS_WR_DELAY		BIT(0)
     39#define CQSPI_DISABLE_DAC_MODE		BIT(1)
     40#define CQSPI_SUPPORT_EXTERNAL_DMA	BIT(2)
     41#define CQSPI_NO_SUPPORT_WR_COMPLETION	BIT(3)
     42
     43/* Capabilities */
     44#define CQSPI_SUPPORTS_OCTAL		BIT(0)
     45
     46#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
     47
     48struct cqspi_st;
     49
     50struct cqspi_flash_pdata {
     51	struct cqspi_st	*cqspi;
     52	u32		clk_rate;
     53	u32		read_delay;
     54	u32		tshsl_ns;
     55	u32		tsd2d_ns;
     56	u32		tchsh_ns;
     57	u32		tslch_ns;
     58	u8		cs;
     59};
     60
     61struct cqspi_st {
     62	struct platform_device	*pdev;
     63	struct spi_master	*master;
     64	struct clk		*clk;
     65	unsigned int		sclk;
     66
     67	void __iomem		*iobase;
     68	void __iomem		*ahb_base;
     69	resource_size_t		ahb_size;
     70	struct completion	transfer_complete;
     71
     72	struct dma_chan		*rx_chan;
     73	struct completion	rx_dma_complete;
     74	dma_addr_t		mmap_phys_base;
     75
     76	int			current_cs;
     77	unsigned long		master_ref_clk_hz;
     78	bool			is_decoded_cs;
     79	u32			fifo_depth;
     80	u32			fifo_width;
     81	u32			num_chipselect;
     82	bool			rclk_en;
     83	u32			trigger_address;
     84	u32			wr_delay;
     85	bool			use_direct_mode;
     86	struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
     87	bool			use_dma_read;
     88	u32			pd_dev_id;
     89	bool			wr_completion;
     90};
     91
     92struct cqspi_driver_platdata {
     93	u32 hwcaps_mask;
     94	u8 quirks;
     95	int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
     96				 u_char *rxbuf, loff_t from_addr, size_t n_rx);
     97	u32 (*get_dma_status)(struct cqspi_st *cqspi);
     98};
     99
    100/* Operation timeout value */
    101#define CQSPI_TIMEOUT_MS			500
    102#define CQSPI_READ_TIMEOUT_MS			10
    103
    104#define CQSPI_DUMMY_CLKS_PER_BYTE		8
    105#define CQSPI_DUMMY_BYTES_MAX			4
    106#define CQSPI_DUMMY_CLKS_MAX			31
    107
    108#define CQSPI_STIG_DATA_LEN_MAX			8
    109
    110/* Register map */
    111#define CQSPI_REG_CONFIG			0x00
    112#define CQSPI_REG_CONFIG_ENABLE_MASK		BIT(0)
    113#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL	BIT(7)
    114#define CQSPI_REG_CONFIG_DECODE_MASK		BIT(9)
    115#define CQSPI_REG_CONFIG_CHIPSELECT_LSB		10
    116#define CQSPI_REG_CONFIG_DMA_MASK		BIT(15)
    117#define CQSPI_REG_CONFIG_BAUD_LSB		19
    118#define CQSPI_REG_CONFIG_DTR_PROTO		BIT(24)
    119#define CQSPI_REG_CONFIG_DUAL_OPCODE		BIT(30)
    120#define CQSPI_REG_CONFIG_IDLE_LSB		31
    121#define CQSPI_REG_CONFIG_CHIPSELECT_MASK	0xF
    122#define CQSPI_REG_CONFIG_BAUD_MASK		0xF
    123
    124#define CQSPI_REG_RD_INSTR			0x04
    125#define CQSPI_REG_RD_INSTR_OPCODE_LSB		0
    126#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB	8
    127#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB	12
    128#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB	16
    129#define CQSPI_REG_RD_INSTR_MODE_EN_LSB		20
    130#define CQSPI_REG_RD_INSTR_DUMMY_LSB		24
    131#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK	0x3
    132#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK	0x3
    133#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK	0x3
    134#define CQSPI_REG_RD_INSTR_DUMMY_MASK		0x1F
    135
    136#define CQSPI_REG_WR_INSTR			0x08
    137#define CQSPI_REG_WR_INSTR_OPCODE_LSB		0
    138#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB	12
    139#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB	16
    140
    141#define CQSPI_REG_DELAY				0x0C
    142#define CQSPI_REG_DELAY_TSLCH_LSB		0
    143#define CQSPI_REG_DELAY_TCHSH_LSB		8
    144#define CQSPI_REG_DELAY_TSD2D_LSB		16
    145#define CQSPI_REG_DELAY_TSHSL_LSB		24
    146#define CQSPI_REG_DELAY_TSLCH_MASK		0xFF
    147#define CQSPI_REG_DELAY_TCHSH_MASK		0xFF
    148#define CQSPI_REG_DELAY_TSD2D_MASK		0xFF
    149#define CQSPI_REG_DELAY_TSHSL_MASK		0xFF
    150
    151#define CQSPI_REG_READCAPTURE			0x10
    152#define CQSPI_REG_READCAPTURE_BYPASS_LSB	0
    153#define CQSPI_REG_READCAPTURE_DELAY_LSB		1
    154#define CQSPI_REG_READCAPTURE_DELAY_MASK	0xF
    155
    156#define CQSPI_REG_SIZE				0x14
    157#define CQSPI_REG_SIZE_ADDRESS_LSB		0
    158#define CQSPI_REG_SIZE_PAGE_LSB			4
    159#define CQSPI_REG_SIZE_BLOCK_LSB		16
    160#define CQSPI_REG_SIZE_ADDRESS_MASK		0xF
    161#define CQSPI_REG_SIZE_PAGE_MASK		0xFFF
    162#define CQSPI_REG_SIZE_BLOCK_MASK		0x3F
    163
    164#define CQSPI_REG_SRAMPARTITION			0x18
    165#define CQSPI_REG_INDIRECTTRIGGER		0x1C
    166
    167#define CQSPI_REG_DMA				0x20
    168#define CQSPI_REG_DMA_SINGLE_LSB		0
    169#define CQSPI_REG_DMA_BURST_LSB			8
    170#define CQSPI_REG_DMA_SINGLE_MASK		0xFF
    171#define CQSPI_REG_DMA_BURST_MASK		0xFF
    172
    173#define CQSPI_REG_REMAP				0x24
    174#define CQSPI_REG_MODE_BIT			0x28
    175
    176#define CQSPI_REG_SDRAMLEVEL			0x2C
    177#define CQSPI_REG_SDRAMLEVEL_RD_LSB		0
    178#define CQSPI_REG_SDRAMLEVEL_WR_LSB		16
    179#define CQSPI_REG_SDRAMLEVEL_RD_MASK		0xFFFF
    180#define CQSPI_REG_SDRAMLEVEL_WR_MASK		0xFFFF
    181
    182#define CQSPI_REG_WR_COMPLETION_CTRL		0x38
    183#define CQSPI_REG_WR_DISABLE_AUTO_POLL		BIT(14)
    184
    185#define CQSPI_REG_IRQSTATUS			0x40
    186#define CQSPI_REG_IRQMASK			0x44
    187
    188#define CQSPI_REG_INDIRECTRD			0x60
    189#define CQSPI_REG_INDIRECTRD_START_MASK		BIT(0)
    190#define CQSPI_REG_INDIRECTRD_CANCEL_MASK	BIT(1)
    191#define CQSPI_REG_INDIRECTRD_DONE_MASK		BIT(5)
    192
    193#define CQSPI_REG_INDIRECTRDWATERMARK		0x64
    194#define CQSPI_REG_INDIRECTRDSTARTADDR		0x68
    195#define CQSPI_REG_INDIRECTRDBYTES		0x6C
    196
    197#define CQSPI_REG_CMDCTRL			0x90
    198#define CQSPI_REG_CMDCTRL_EXECUTE_MASK		BIT(0)
    199#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK	BIT(1)
    200#define CQSPI_REG_CMDCTRL_DUMMY_LSB		7
    201#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB		12
    202#define CQSPI_REG_CMDCTRL_WR_EN_LSB		15
    203#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB		16
    204#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB		19
    205#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB		20
    206#define CQSPI_REG_CMDCTRL_RD_EN_LSB		23
    207#define CQSPI_REG_CMDCTRL_OPCODE_LSB		24
    208#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK		0x7
    209#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK	0x3
    210#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK		0x7
    211#define CQSPI_REG_CMDCTRL_DUMMY_MASK		0x1F
    212
    213#define CQSPI_REG_INDIRECTWR			0x70
    214#define CQSPI_REG_INDIRECTWR_START_MASK		BIT(0)
    215#define CQSPI_REG_INDIRECTWR_CANCEL_MASK	BIT(1)
    216#define CQSPI_REG_INDIRECTWR_DONE_MASK		BIT(5)
    217
    218#define CQSPI_REG_INDIRECTWRWATERMARK		0x74
    219#define CQSPI_REG_INDIRECTWRSTARTADDR		0x78
    220#define CQSPI_REG_INDIRECTWRBYTES		0x7C
    221
    222#define CQSPI_REG_INDTRIG_ADDRRANGE		0x80
    223
    224#define CQSPI_REG_CMDADDRESS			0x94
    225#define CQSPI_REG_CMDREADDATALOWER		0xA0
    226#define CQSPI_REG_CMDREADDATAUPPER		0xA4
    227#define CQSPI_REG_CMDWRITEDATALOWER		0xA8
    228#define CQSPI_REG_CMDWRITEDATAUPPER		0xAC
    229
    230#define CQSPI_REG_POLLING_STATUS		0xB0
    231#define CQSPI_REG_POLLING_STATUS_DUMMY_LSB	16
    232
    233#define CQSPI_REG_OP_EXT_LOWER			0xE0
    234#define CQSPI_REG_OP_EXT_READ_LSB		24
    235#define CQSPI_REG_OP_EXT_WRITE_LSB		16
    236#define CQSPI_REG_OP_EXT_STIG_LSB		0
    237
    238#define CQSPI_REG_VERSAL_DMA_SRC_ADDR		0x1000
    239
    240#define CQSPI_REG_VERSAL_DMA_DST_ADDR		0x1800
    241#define CQSPI_REG_VERSAL_DMA_DST_SIZE		0x1804
    242
    243#define CQSPI_REG_VERSAL_DMA_DST_CTRL		0x180C
    244
    245#define CQSPI_REG_VERSAL_DMA_DST_I_STS		0x1814
    246#define CQSPI_REG_VERSAL_DMA_DST_I_EN		0x1818
    247#define CQSPI_REG_VERSAL_DMA_DST_I_DIS		0x181C
    248#define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK	BIT(1)
    249
    250#define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB	0x1828
    251
    252#define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL	0xF43FFA00
    253#define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL	0x6
    254
    255/* Interrupt status bits */
    256#define CQSPI_REG_IRQ_MODE_ERR			BIT(0)
    257#define CQSPI_REG_IRQ_UNDERFLOW			BIT(1)
    258#define CQSPI_REG_IRQ_IND_COMP			BIT(2)
    259#define CQSPI_REG_IRQ_IND_RD_REJECT		BIT(3)
    260#define CQSPI_REG_IRQ_WR_PROTECTED_ERR		BIT(4)
    261#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR		BIT(5)
    262#define CQSPI_REG_IRQ_WATERMARK			BIT(6)
    263#define CQSPI_REG_IRQ_IND_SRAM_FULL		BIT(12)
    264
    265#define CQSPI_IRQ_MASK_RD		(CQSPI_REG_IRQ_WATERMARK	| \
    266					 CQSPI_REG_IRQ_IND_SRAM_FULL	| \
    267					 CQSPI_REG_IRQ_IND_COMP)
    268
    269#define CQSPI_IRQ_MASK_WR		(CQSPI_REG_IRQ_IND_COMP		| \
    270					 CQSPI_REG_IRQ_WATERMARK	| \
    271					 CQSPI_REG_IRQ_UNDERFLOW)
    272
    273#define CQSPI_IRQ_STATUS_MASK		0x1FFFF
    274#define CQSPI_DMA_UNALIGN		0x3
    275
    276#define CQSPI_REG_VERSAL_DMA_VAL		0x602
    277
    278static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
    279{
    280	u32 val;
    281
    282	return readl_relaxed_poll_timeout(reg, val,
    283					  (((clr ? ~val : val) & mask) == mask),
    284					  10, CQSPI_TIMEOUT_MS * 1000);
    285}
    286
    287static bool cqspi_is_idle(struct cqspi_st *cqspi)
    288{
    289	u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
    290
    291	return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB);
    292}
    293
    294static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
    295{
    296	u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
    297
    298	reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
    299	return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
    300}
    301
    302static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
    303{
    304	u32 dma_status;
    305
    306	dma_status = readl(cqspi->iobase +
    307					   CQSPI_REG_VERSAL_DMA_DST_I_STS);
    308	writel(dma_status, cqspi->iobase +
    309		   CQSPI_REG_VERSAL_DMA_DST_I_STS);
    310
    311	return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
    312}
    313
    314static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
    315{
    316	struct cqspi_st *cqspi = dev;
    317	unsigned int irq_status;
    318	struct device *device = &cqspi->pdev->dev;
    319	const struct cqspi_driver_platdata *ddata;
    320
    321	ddata = of_device_get_match_data(device);
    322
    323	/* Read interrupt status */
    324	irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
    325
    326	/* Clear interrupt */
    327	writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
    328
    329	if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
    330		if (ddata->get_dma_status(cqspi)) {
    331			complete(&cqspi->transfer_complete);
    332			return IRQ_HANDLED;
    333		}
    334	}
    335
    336	irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
    337
    338	if (irq_status)
    339		complete(&cqspi->transfer_complete);
    340
    341	return IRQ_HANDLED;
    342}
    343
    344static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op)
    345{
    346	u32 rdreg = 0;
    347
    348	rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
    349	rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
    350	rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
    351
    352	return rdreg;
    353}
    354
    355static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op)
    356{
    357	unsigned int dummy_clk;
    358
    359	if (!op->dummy.nbytes)
    360		return 0;
    361
    362	dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
    363	if (op->cmd.dtr)
    364		dummy_clk /= 2;
    365
    366	return dummy_clk;
    367}
    368
    369static int cqspi_wait_idle(struct cqspi_st *cqspi)
    370{
    371	const unsigned int poll_idle_retry = 3;
    372	unsigned int count = 0;
    373	unsigned long timeout;
    374
    375	timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
    376	while (1) {
    377		/*
    378		 * Read few times in succession to ensure the controller
    379		 * is indeed idle, that is, the bit does not transition
    380		 * low again.
    381		 */
    382		if (cqspi_is_idle(cqspi))
    383			count++;
    384		else
    385			count = 0;
    386
    387		if (count >= poll_idle_retry)
    388			return 0;
    389
    390		if (time_after(jiffies, timeout)) {
    391			/* Timeout, in busy mode. */
    392			dev_err(&cqspi->pdev->dev,
    393				"QSPI is still busy after %dms timeout.\n",
    394				CQSPI_TIMEOUT_MS);
    395			return -ETIMEDOUT;
    396		}
    397
    398		cpu_relax();
    399	}
    400}
    401
    402static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
    403{
    404	void __iomem *reg_base = cqspi->iobase;
    405	int ret;
    406
    407	/* Write the CMDCTRL without start execution. */
    408	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
    409	/* Start execute */
    410	reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
    411	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
    412
    413	/* Polling for completion. */
    414	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
    415				 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
    416	if (ret) {
    417		dev_err(&cqspi->pdev->dev,
    418			"Flash command execution timed out.\n");
    419		return ret;
    420	}
    421
    422	/* Polling QSPI idle status. */
    423	return cqspi_wait_idle(cqspi);
    424}
    425
    426static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
    427				  const struct spi_mem_op *op,
    428				  unsigned int shift)
    429{
    430	struct cqspi_st *cqspi = f_pdata->cqspi;
    431	void __iomem *reg_base = cqspi->iobase;
    432	unsigned int reg;
    433	u8 ext;
    434
    435	if (op->cmd.nbytes != 2)
    436		return -EINVAL;
    437
    438	/* Opcode extension is the LSB. */
    439	ext = op->cmd.opcode & 0xff;
    440
    441	reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
    442	reg &= ~(0xff << shift);
    443	reg |= ext << shift;
    444	writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
    445
    446	return 0;
    447}
    448
    449static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
    450			    const struct spi_mem_op *op, unsigned int shift)
    451{
    452	struct cqspi_st *cqspi = f_pdata->cqspi;
    453	void __iomem *reg_base = cqspi->iobase;
    454	unsigned int reg;
    455	int ret;
    456
    457	reg = readl(reg_base + CQSPI_REG_CONFIG);
    458
    459	/*
    460	 * We enable dual byte opcode here. The callers have to set up the
    461	 * extension opcode based on which type of operation it is.
    462	 */
    463	if (op->cmd.dtr) {
    464		reg |= CQSPI_REG_CONFIG_DTR_PROTO;
    465		reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
    466
    467		/* Set up command opcode extension. */
    468		ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
    469		if (ret)
    470			return ret;
    471	} else {
    472		reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
    473		reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
    474	}
    475
    476	writel(reg, reg_base + CQSPI_REG_CONFIG);
    477
    478	return cqspi_wait_idle(cqspi);
    479}
    480
    481static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
    482			      const struct spi_mem_op *op)
    483{
    484	struct cqspi_st *cqspi = f_pdata->cqspi;
    485	void __iomem *reg_base = cqspi->iobase;
    486	u8 *rxbuf = op->data.buf.in;
    487	u8 opcode;
    488	size_t n_rx = op->data.nbytes;
    489	unsigned int rdreg;
    490	unsigned int reg;
    491	unsigned int dummy_clk;
    492	size_t read_len;
    493	int status;
    494
    495	status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
    496	if (status)
    497		return status;
    498
    499	if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
    500		dev_err(&cqspi->pdev->dev,
    501			"Invalid input argument, len %zu rxbuf 0x%p\n",
    502			n_rx, rxbuf);
    503		return -EINVAL;
    504	}
    505
    506	if (op->cmd.dtr)
    507		opcode = op->cmd.opcode >> 8;
    508	else
    509		opcode = op->cmd.opcode;
    510
    511	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
    512
    513	rdreg = cqspi_calc_rdreg(op);
    514	writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
    515
    516	dummy_clk = cqspi_calc_dummy(op);
    517	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
    518		return -EOPNOTSUPP;
    519
    520	if (dummy_clk)
    521		reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
    522		     << CQSPI_REG_CMDCTRL_DUMMY_LSB;
    523
    524	reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
    525
    526	/* 0 means 1 byte. */
    527	reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
    528		<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
    529	status = cqspi_exec_flash_cmd(cqspi, reg);
    530	if (status)
    531		return status;
    532
    533	reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
    534
    535	/* Put the read value into rx_buf */
    536	read_len = (n_rx > 4) ? 4 : n_rx;
    537	memcpy(rxbuf, &reg, read_len);
    538	rxbuf += read_len;
    539
    540	if (n_rx > 4) {
    541		reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
    542
    543		read_len = n_rx - read_len;
    544		memcpy(rxbuf, &reg, read_len);
    545	}
    546
    547	return 0;
    548}
    549
    550static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
    551			       const struct spi_mem_op *op)
    552{
    553	struct cqspi_st *cqspi = f_pdata->cqspi;
    554	void __iomem *reg_base = cqspi->iobase;
    555	u8 opcode;
    556	const u8 *txbuf = op->data.buf.out;
    557	size_t n_tx = op->data.nbytes;
    558	unsigned int reg;
    559	unsigned int data;
    560	size_t write_len;
    561	int ret;
    562
    563	ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
    564	if (ret)
    565		return ret;
    566
    567	if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
    568		dev_err(&cqspi->pdev->dev,
    569			"Invalid input argument, cmdlen %zu txbuf 0x%p\n",
    570			n_tx, txbuf);
    571		return -EINVAL;
    572	}
    573
    574	reg = cqspi_calc_rdreg(op);
    575	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
    576
    577	if (op->cmd.dtr)
    578		opcode = op->cmd.opcode >> 8;
    579	else
    580		opcode = op->cmd.opcode;
    581
    582	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
    583
    584	if (op->addr.nbytes) {
    585		reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
    586		reg |= ((op->addr.nbytes - 1) &
    587			CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
    588			<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
    589
    590		writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
    591	}
    592
    593	if (n_tx) {
    594		reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
    595		reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
    596			<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
    597		data = 0;
    598		write_len = (n_tx > 4) ? 4 : n_tx;
    599		memcpy(&data, txbuf, write_len);
    600		txbuf += write_len;
    601		writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
    602
    603		if (n_tx > 4) {
    604			data = 0;
    605			write_len = n_tx - 4;
    606			memcpy(&data, txbuf, write_len);
    607			writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
    608		}
    609	}
    610
    611	return cqspi_exec_flash_cmd(cqspi, reg);
    612}
    613
    614static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
    615			    const struct spi_mem_op *op)
    616{
    617	struct cqspi_st *cqspi = f_pdata->cqspi;
    618	void __iomem *reg_base = cqspi->iobase;
    619	unsigned int dummy_clk = 0;
    620	unsigned int reg;
    621	int ret;
    622	u8 opcode;
    623
    624	ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB);
    625	if (ret)
    626		return ret;
    627
    628	if (op->cmd.dtr)
    629		opcode = op->cmd.opcode >> 8;
    630	else
    631		opcode = op->cmd.opcode;
    632
    633	reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
    634	reg |= cqspi_calc_rdreg(op);
    635
    636	/* Setup dummy clock cycles */
    637	dummy_clk = cqspi_calc_dummy(op);
    638
    639	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
    640		return -EOPNOTSUPP;
    641
    642	if (dummy_clk)
    643		reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
    644		       << CQSPI_REG_RD_INSTR_DUMMY_LSB;
    645
    646	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
    647
    648	/* Set address width */
    649	reg = readl(reg_base + CQSPI_REG_SIZE);
    650	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
    651	reg |= (op->addr.nbytes - 1);
    652	writel(reg, reg_base + CQSPI_REG_SIZE);
    653	return 0;
    654}
    655
    656static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
    657				       u8 *rxbuf, loff_t from_addr,
    658				       const size_t n_rx)
    659{
    660	struct cqspi_st *cqspi = f_pdata->cqspi;
    661	struct device *dev = &cqspi->pdev->dev;
    662	void __iomem *reg_base = cqspi->iobase;
    663	void __iomem *ahb_base = cqspi->ahb_base;
    664	unsigned int remaining = n_rx;
    665	unsigned int mod_bytes = n_rx % 4;
    666	unsigned int bytes_to_read = 0;
    667	u8 *rxbuf_end = rxbuf + n_rx;
    668	int ret = 0;
    669
    670	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
    671	writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
    672
    673	/* Clear all interrupts. */
    674	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
    675
    676	writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
    677
    678	reinit_completion(&cqspi->transfer_complete);
    679	writel(CQSPI_REG_INDIRECTRD_START_MASK,
    680	       reg_base + CQSPI_REG_INDIRECTRD);
    681
    682	while (remaining > 0) {
    683		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
    684						 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
    685			ret = -ETIMEDOUT;
    686
    687		bytes_to_read = cqspi_get_rd_sram_level(cqspi);
    688
    689		if (ret && bytes_to_read == 0) {
    690			dev_err(dev, "Indirect read timeout, no bytes\n");
    691			goto failrd;
    692		}
    693
    694		while (bytes_to_read != 0) {
    695			unsigned int word_remain = round_down(remaining, 4);
    696
    697			bytes_to_read *= cqspi->fifo_width;
    698			bytes_to_read = bytes_to_read > remaining ?
    699					remaining : bytes_to_read;
    700			bytes_to_read = round_down(bytes_to_read, 4);
    701			/* Read 4 byte word chunks then single bytes */
    702			if (bytes_to_read) {
    703				ioread32_rep(ahb_base, rxbuf,
    704					     (bytes_to_read / 4));
    705			} else if (!word_remain && mod_bytes) {
    706				unsigned int temp = ioread32(ahb_base);
    707
    708				bytes_to_read = mod_bytes;
    709				memcpy(rxbuf, &temp, min((unsigned int)
    710							 (rxbuf_end - rxbuf),
    711							 bytes_to_read));
    712			}
    713			rxbuf += bytes_to_read;
    714			remaining -= bytes_to_read;
    715			bytes_to_read = cqspi_get_rd_sram_level(cqspi);
    716		}
    717
    718		if (remaining > 0)
    719			reinit_completion(&cqspi->transfer_complete);
    720	}
    721
    722	/* Check indirect done status */
    723	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
    724				 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
    725	if (ret) {
    726		dev_err(dev, "Indirect read completion error (%i)\n", ret);
    727		goto failrd;
    728	}
    729
    730	/* Disable interrupt */
    731	writel(0, reg_base + CQSPI_REG_IRQMASK);
    732
    733	/* Clear indirect completion status */
    734	writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
    735
    736	return 0;
    737
    738failrd:
    739	/* Disable interrupt */
    740	writel(0, reg_base + CQSPI_REG_IRQMASK);
    741
    742	/* Cancel the indirect read */
    743	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
    744	       reg_base + CQSPI_REG_INDIRECTRD);
    745	return ret;
    746}
    747
    748static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
    749					  u_char *rxbuf, loff_t from_addr,
    750					  size_t n_rx)
    751{
    752	struct cqspi_st *cqspi = f_pdata->cqspi;
    753	struct device *dev = &cqspi->pdev->dev;
    754	void __iomem *reg_base = cqspi->iobase;
    755	u32 reg, bytes_to_dma;
    756	loff_t addr = from_addr;
    757	void *buf = rxbuf;
    758	dma_addr_t dma_addr;
    759	u8 bytes_rem;
    760	int ret = 0;
    761
    762	bytes_rem = n_rx % 4;
    763	bytes_to_dma = (n_rx - bytes_rem);
    764
    765	if (!bytes_to_dma)
    766		goto nondmard;
    767
    768	ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
    769	if (ret)
    770		return ret;
    771
    772	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
    773	reg |= CQSPI_REG_CONFIG_DMA_MASK;
    774	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
    775
    776	dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
    777	if (dma_mapping_error(dev, dma_addr)) {
    778		dev_err(dev, "dma mapping failed\n");
    779		return -ENOMEM;
    780	}
    781
    782	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
    783	writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
    784	writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
    785	       reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
    786
    787	/* Clear all interrupts. */
    788	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
    789
    790	/* Enable DMA done interrupt */
    791	writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
    792	       reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
    793
    794	/* Default DMA periph configuration */
    795	writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
    796
    797	/* Configure DMA Dst address */
    798	writel(lower_32_bits(dma_addr),
    799	       reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
    800	writel(upper_32_bits(dma_addr),
    801	       reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
    802
    803	/* Configure DMA Src address */
    804	writel(cqspi->trigger_address, reg_base +
    805	       CQSPI_REG_VERSAL_DMA_SRC_ADDR);
    806
    807	/* Set DMA destination size */
    808	writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
    809
    810	/* Set DMA destination control */
    811	writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
    812	       reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
    813
    814	writel(CQSPI_REG_INDIRECTRD_START_MASK,
    815	       reg_base + CQSPI_REG_INDIRECTRD);
    816
    817	reinit_completion(&cqspi->transfer_complete);
    818
    819	if (!wait_for_completion_timeout(&cqspi->transfer_complete,
    820					 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
    821		ret = -ETIMEDOUT;
    822		goto failrd;
    823	}
    824
    825	/* Disable DMA interrupt */
    826	writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
    827
    828	/* Clear indirect completion status */
    829	writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
    830	       cqspi->iobase + CQSPI_REG_INDIRECTRD);
    831	dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
    832
    833	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
    834	reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
    835	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
    836
    837	ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
    838					PM_OSPI_MUX_SEL_LINEAR);
    839	if (ret)
    840		return ret;
    841
    842nondmard:
    843	if (bytes_rem) {
    844		addr += bytes_to_dma;
    845		buf += bytes_to_dma;
    846		ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
    847						  bytes_rem);
    848		if (ret)
    849			return ret;
    850	}
    851
    852	return 0;
    853
    854failrd:
    855	/* Disable DMA interrupt */
    856	writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
    857
    858	/* Cancel the indirect read */
    859	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
    860	       reg_base + CQSPI_REG_INDIRECTRD);
    861
    862	dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
    863
    864	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
    865	reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
    866	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
    867
    868	zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
    869
    870	return ret;
    871}
    872
    873static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
    874			     const struct spi_mem_op *op)
    875{
    876	unsigned int reg;
    877	int ret;
    878	struct cqspi_st *cqspi = f_pdata->cqspi;
    879	void __iomem *reg_base = cqspi->iobase;
    880	u8 opcode;
    881
    882	ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB);
    883	if (ret)
    884		return ret;
    885
    886	if (op->cmd.dtr)
    887		opcode = op->cmd.opcode >> 8;
    888	else
    889		opcode = op->cmd.opcode;
    890
    891	/* Set opcode. */
    892	reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
    893	reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
    894	reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
    895	writel(reg, reg_base + CQSPI_REG_WR_INSTR);
    896	reg = cqspi_calc_rdreg(op);
    897	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
    898
    899	/*
    900	 * SPI NAND flashes require the address of the status register to be
    901	 * passed in the Read SR command. Also, some SPI NOR flashes like the
    902	 * cypress Semper flash expect a 4-byte dummy address in the Read SR
    903	 * command in DTR mode.
    904	 *
    905	 * But this controller does not support address phase in the Read SR
    906	 * command when doing auto-HW polling. So, disable write completion
    907	 * polling on the controller's side. spinand and spi-nor will take
    908	 * care of polling the status register.
    909	 */
    910	if (cqspi->wr_completion) {
    911		reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
    912		reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
    913		writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
    914	}
    915
    916	reg = readl(reg_base + CQSPI_REG_SIZE);
    917	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
    918	reg |= (op->addr.nbytes - 1);
    919	writel(reg, reg_base + CQSPI_REG_SIZE);
    920	return 0;
    921}
    922
    923static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
    924					loff_t to_addr, const u8 *txbuf,
    925					const size_t n_tx)
    926{
    927	struct cqspi_st *cqspi = f_pdata->cqspi;
    928	struct device *dev = &cqspi->pdev->dev;
    929	void __iomem *reg_base = cqspi->iobase;
    930	unsigned int remaining = n_tx;
    931	unsigned int write_bytes;
    932	int ret;
    933
    934	writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
    935	writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
    936
    937	/* Clear all interrupts. */
    938	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
    939
    940	writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
    941
    942	reinit_completion(&cqspi->transfer_complete);
    943	writel(CQSPI_REG_INDIRECTWR_START_MASK,
    944	       reg_base + CQSPI_REG_INDIRECTWR);
    945	/*
    946	 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
    947	 * Controller programming sequence, couple of cycles of
    948	 * QSPI_REF_CLK delay is required for the above bit to
    949	 * be internally synchronized by the QSPI module. Provide 5
    950	 * cycles of delay.
    951	 */
    952	if (cqspi->wr_delay)
    953		ndelay(cqspi->wr_delay);
    954
    955	while (remaining > 0) {
    956		size_t write_words, mod_bytes;
    957
    958		write_bytes = remaining;
    959		write_words = write_bytes / 4;
    960		mod_bytes = write_bytes % 4;
    961		/* Write 4 bytes at a time then single bytes. */
    962		if (write_words) {
    963			iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
    964			txbuf += (write_words * 4);
    965		}
    966		if (mod_bytes) {
    967			unsigned int temp = 0xFFFFFFFF;
    968
    969			memcpy(&temp, txbuf, mod_bytes);
    970			iowrite32(temp, cqspi->ahb_base);
    971			txbuf += mod_bytes;
    972		}
    973
    974		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
    975						 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
    976			dev_err(dev, "Indirect write timeout\n");
    977			ret = -ETIMEDOUT;
    978			goto failwr;
    979		}
    980
    981		remaining -= write_bytes;
    982
    983		if (remaining > 0)
    984			reinit_completion(&cqspi->transfer_complete);
    985	}
    986
    987	/* Check indirect done status */
    988	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
    989				 CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
    990	if (ret) {
    991		dev_err(dev, "Indirect write completion error (%i)\n", ret);
    992		goto failwr;
    993	}
    994
    995	/* Disable interrupt. */
    996	writel(0, reg_base + CQSPI_REG_IRQMASK);
    997
    998	/* Clear indirect completion status */
    999	writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
   1000
   1001	cqspi_wait_idle(cqspi);
   1002
   1003	return 0;
   1004
   1005failwr:
   1006	/* Disable interrupt. */
   1007	writel(0, reg_base + CQSPI_REG_IRQMASK);
   1008
   1009	/* Cancel the indirect write */
   1010	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
   1011	       reg_base + CQSPI_REG_INDIRECTWR);
   1012	return ret;
   1013}
   1014
   1015static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
   1016{
   1017	struct cqspi_st *cqspi = f_pdata->cqspi;
   1018	void __iomem *reg_base = cqspi->iobase;
   1019	unsigned int chip_select = f_pdata->cs;
   1020	unsigned int reg;
   1021
   1022	reg = readl(reg_base + CQSPI_REG_CONFIG);
   1023	if (cqspi->is_decoded_cs) {
   1024		reg |= CQSPI_REG_CONFIG_DECODE_MASK;
   1025	} else {
   1026		reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
   1027
   1028		/* Convert CS if without decoder.
   1029		 * CS0 to 4b'1110
   1030		 * CS1 to 4b'1101
   1031		 * CS2 to 4b'1011
   1032		 * CS3 to 4b'0111
   1033		 */
   1034		chip_select = 0xF & ~(1 << chip_select);
   1035	}
   1036
   1037	reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
   1038		 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
   1039	reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
   1040	    << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
   1041	writel(reg, reg_base + CQSPI_REG_CONFIG);
   1042}
   1043
   1044static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
   1045					   const unsigned int ns_val)
   1046{
   1047	unsigned int ticks;
   1048
   1049	ticks = ref_clk_hz / 1000;	/* kHz */
   1050	ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
   1051
   1052	return ticks;
   1053}
   1054
   1055static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
   1056{
   1057	struct cqspi_st *cqspi = f_pdata->cqspi;
   1058	void __iomem *iobase = cqspi->iobase;
   1059	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
   1060	unsigned int tshsl, tchsh, tslch, tsd2d;
   1061	unsigned int reg;
   1062	unsigned int tsclk;
   1063
   1064	/* calculate the number of ref ticks for one sclk tick */
   1065	tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
   1066
   1067	tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
   1068	/* this particular value must be at least one sclk */
   1069	if (tshsl < tsclk)
   1070		tshsl = tsclk;
   1071
   1072	tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
   1073	tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
   1074	tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
   1075
   1076	reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
   1077	       << CQSPI_REG_DELAY_TSHSL_LSB;
   1078	reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
   1079		<< CQSPI_REG_DELAY_TCHSH_LSB;
   1080	reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
   1081		<< CQSPI_REG_DELAY_TSLCH_LSB;
   1082	reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
   1083		<< CQSPI_REG_DELAY_TSD2D_LSB;
   1084	writel(reg, iobase + CQSPI_REG_DELAY);
   1085}
   1086
   1087static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
   1088{
   1089	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
   1090	void __iomem *reg_base = cqspi->iobase;
   1091	u32 reg, div;
   1092
   1093	/* Recalculate the baudrate divisor based on QSPI specification. */
   1094	div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
   1095
   1096	reg = readl(reg_base + CQSPI_REG_CONFIG);
   1097	reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
   1098	reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
   1099	writel(reg, reg_base + CQSPI_REG_CONFIG);
   1100}
   1101
   1102static void cqspi_readdata_capture(struct cqspi_st *cqspi,
   1103				   const bool bypass,
   1104				   const unsigned int delay)
   1105{
   1106	void __iomem *reg_base = cqspi->iobase;
   1107	unsigned int reg;
   1108
   1109	reg = readl(reg_base + CQSPI_REG_READCAPTURE);
   1110
   1111	if (bypass)
   1112		reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
   1113	else
   1114		reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
   1115
   1116	reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
   1117		 << CQSPI_REG_READCAPTURE_DELAY_LSB);
   1118
   1119	reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
   1120		<< CQSPI_REG_READCAPTURE_DELAY_LSB;
   1121
   1122	writel(reg, reg_base + CQSPI_REG_READCAPTURE);
   1123}
   1124
   1125static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
   1126{
   1127	void __iomem *reg_base = cqspi->iobase;
   1128	unsigned int reg;
   1129
   1130	reg = readl(reg_base + CQSPI_REG_CONFIG);
   1131
   1132	if (enable)
   1133		reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
   1134	else
   1135		reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
   1136
   1137	writel(reg, reg_base + CQSPI_REG_CONFIG);
   1138}
   1139
   1140static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
   1141			    unsigned long sclk)
   1142{
   1143	struct cqspi_st *cqspi = f_pdata->cqspi;
   1144	int switch_cs = (cqspi->current_cs != f_pdata->cs);
   1145	int switch_ck = (cqspi->sclk != sclk);
   1146
   1147	if (switch_cs || switch_ck)
   1148		cqspi_controller_enable(cqspi, 0);
   1149
   1150	/* Switch chip select. */
   1151	if (switch_cs) {
   1152		cqspi->current_cs = f_pdata->cs;
   1153		cqspi_chipselect(f_pdata);
   1154	}
   1155
   1156	/* Setup baudrate divisor and delays */
   1157	if (switch_ck) {
   1158		cqspi->sclk = sclk;
   1159		cqspi_config_baudrate_div(cqspi);
   1160		cqspi_delay(f_pdata);
   1161		cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
   1162				       f_pdata->read_delay);
   1163	}
   1164
   1165	if (switch_cs || switch_ck)
   1166		cqspi_controller_enable(cqspi, 1);
   1167}
   1168
   1169static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
   1170			   const struct spi_mem_op *op)
   1171{
   1172	struct cqspi_st *cqspi = f_pdata->cqspi;
   1173	loff_t to = op->addr.val;
   1174	size_t len = op->data.nbytes;
   1175	const u_char *buf = op->data.buf.out;
   1176	int ret;
   1177
   1178	ret = cqspi_write_setup(f_pdata, op);
   1179	if (ret)
   1180		return ret;
   1181
   1182	/*
   1183	 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
   1184	 * address (all 0s) with the read status register command in DTR mode.
   1185	 * But this controller does not support sending dummy address bytes to
   1186	 * the flash when it is polling the write completion register in DTR
   1187	 * mode. So, we can not use direct mode when in DTR mode for writing
   1188	 * data.
   1189	 */
   1190	if (!op->cmd.dtr && cqspi->use_direct_mode &&
   1191	    ((to + len) <= cqspi->ahb_size)) {
   1192		memcpy_toio(cqspi->ahb_base + to, buf, len);
   1193		return cqspi_wait_idle(cqspi);
   1194	}
   1195
   1196	return cqspi_indirect_write_execute(f_pdata, to, buf, len);
   1197}
   1198
   1199static void cqspi_rx_dma_callback(void *param)
   1200{
   1201	struct cqspi_st *cqspi = param;
   1202
   1203	complete(&cqspi->rx_dma_complete);
   1204}
   1205
   1206static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
   1207				     u_char *buf, loff_t from, size_t len)
   1208{
   1209	struct cqspi_st *cqspi = f_pdata->cqspi;
   1210	struct device *dev = &cqspi->pdev->dev;
   1211	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
   1212	dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
   1213	int ret = 0;
   1214	struct dma_async_tx_descriptor *tx;
   1215	dma_cookie_t cookie;
   1216	dma_addr_t dma_dst;
   1217	struct device *ddev;
   1218
   1219	if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
   1220		memcpy_fromio(buf, cqspi->ahb_base + from, len);
   1221		return 0;
   1222	}
   1223
   1224	ddev = cqspi->rx_chan->device->dev;
   1225	dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
   1226	if (dma_mapping_error(ddev, dma_dst)) {
   1227		dev_err(dev, "dma mapping failed\n");
   1228		return -ENOMEM;
   1229	}
   1230	tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
   1231				       len, flags);
   1232	if (!tx) {
   1233		dev_err(dev, "device_prep_dma_memcpy error\n");
   1234		ret = -EIO;
   1235		goto err_unmap;
   1236	}
   1237
   1238	tx->callback = cqspi_rx_dma_callback;
   1239	tx->callback_param = cqspi;
   1240	cookie = tx->tx_submit(tx);
   1241	reinit_completion(&cqspi->rx_dma_complete);
   1242
   1243	ret = dma_submit_error(cookie);
   1244	if (ret) {
   1245		dev_err(dev, "dma_submit_error %d\n", cookie);
   1246		ret = -EIO;
   1247		goto err_unmap;
   1248	}
   1249
   1250	dma_async_issue_pending(cqspi->rx_chan);
   1251	if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
   1252					 msecs_to_jiffies(max_t(size_t, len, 500)))) {
   1253		dmaengine_terminate_sync(cqspi->rx_chan);
   1254		dev_err(dev, "DMA wait_for_completion_timeout\n");
   1255		ret = -ETIMEDOUT;
   1256		goto err_unmap;
   1257	}
   1258
   1259err_unmap:
   1260	dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
   1261
   1262	return ret;
   1263}
   1264
   1265static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
   1266			  const struct spi_mem_op *op)
   1267{
   1268	struct cqspi_st *cqspi = f_pdata->cqspi;
   1269	struct device *dev = &cqspi->pdev->dev;
   1270	const struct cqspi_driver_platdata *ddata;
   1271	loff_t from = op->addr.val;
   1272	size_t len = op->data.nbytes;
   1273	u_char *buf = op->data.buf.in;
   1274	u64 dma_align = (u64)(uintptr_t)buf;
   1275	int ret;
   1276
   1277	ddata = of_device_get_match_data(dev);
   1278
   1279	ret = cqspi_read_setup(f_pdata, op);
   1280	if (ret)
   1281		return ret;
   1282
   1283	if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
   1284		return cqspi_direct_read_execute(f_pdata, buf, from, len);
   1285
   1286	if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
   1287	    virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
   1288		return ddata->indirect_read_dma(f_pdata, buf, from, len);
   1289
   1290	return cqspi_indirect_read_execute(f_pdata, buf, from, len);
   1291}
   1292
   1293static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
   1294{
   1295	struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
   1296	struct cqspi_flash_pdata *f_pdata;
   1297
   1298	f_pdata = &cqspi->f_pdata[mem->spi->chip_select];
   1299	cqspi_configure(f_pdata, mem->spi->max_speed_hz);
   1300
   1301	if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
   1302		if (!op->addr.nbytes)
   1303			return cqspi_command_read(f_pdata, op);
   1304
   1305		return cqspi_read(f_pdata, op);
   1306	}
   1307
   1308	if (!op->addr.nbytes || !op->data.buf.out)
   1309		return cqspi_command_write(f_pdata, op);
   1310
   1311	return cqspi_write(f_pdata, op);
   1312}
   1313
   1314static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
   1315{
   1316	int ret;
   1317
   1318	ret = cqspi_mem_process(mem, op);
   1319	if (ret)
   1320		dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
   1321
   1322	return ret;
   1323}
   1324
   1325static bool cqspi_supports_mem_op(struct spi_mem *mem,
   1326				  const struct spi_mem_op *op)
   1327{
   1328	bool all_true, all_false;
   1329
   1330	/*
   1331	 * op->dummy.dtr is required for converting nbytes into ncycles.
   1332	 * Also, don't check the dtr field of the op phase having zero nbytes.
   1333	 */
   1334	all_true = op->cmd.dtr &&
   1335		   (!op->addr.nbytes || op->addr.dtr) &&
   1336		   (!op->dummy.nbytes || op->dummy.dtr) &&
   1337		   (!op->data.nbytes || op->data.dtr);
   1338
   1339	all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
   1340		    !op->data.dtr;
   1341
   1342	if (all_true) {
   1343		/* Right now we only support 8-8-8 DTR mode. */
   1344		if (op->cmd.nbytes && op->cmd.buswidth != 8)
   1345			return false;
   1346		if (op->addr.nbytes && op->addr.buswidth != 8)
   1347			return false;
   1348		if (op->data.nbytes && op->data.buswidth != 8)
   1349			return false;
   1350	} else if (!all_false) {
   1351		/* Mixed DTR modes are not supported. */
   1352		return false;
   1353	}
   1354
   1355	return spi_mem_default_supports_op(mem, op);
   1356}
   1357
   1358static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
   1359				    struct cqspi_flash_pdata *f_pdata,
   1360				    struct device_node *np)
   1361{
   1362	if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
   1363		dev_err(&pdev->dev, "couldn't determine read-delay\n");
   1364		return -ENXIO;
   1365	}
   1366
   1367	if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
   1368		dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
   1369		return -ENXIO;
   1370	}
   1371
   1372	if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
   1373		dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
   1374		return -ENXIO;
   1375	}
   1376
   1377	if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
   1378		dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
   1379		return -ENXIO;
   1380	}
   1381
   1382	if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
   1383		dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
   1384		return -ENXIO;
   1385	}
   1386
   1387	if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
   1388		dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
   1389		return -ENXIO;
   1390	}
   1391
   1392	return 0;
   1393}
   1394
   1395static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
   1396{
   1397	struct device *dev = &cqspi->pdev->dev;
   1398	struct device_node *np = dev->of_node;
   1399	u32 id[2];
   1400
   1401	cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
   1402
   1403	if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
   1404		dev_err(dev, "couldn't determine fifo-depth\n");
   1405		return -ENXIO;
   1406	}
   1407
   1408	if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
   1409		dev_err(dev, "couldn't determine fifo-width\n");
   1410		return -ENXIO;
   1411	}
   1412
   1413	if (of_property_read_u32(np, "cdns,trigger-address",
   1414				 &cqspi->trigger_address)) {
   1415		dev_err(dev, "couldn't determine trigger-address\n");
   1416		return -ENXIO;
   1417	}
   1418
   1419	if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
   1420		cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
   1421
   1422	cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
   1423
   1424	if (!of_property_read_u32_array(np, "power-domains", id,
   1425					ARRAY_SIZE(id)))
   1426		cqspi->pd_dev_id = id[1];
   1427
   1428	return 0;
   1429}
   1430
   1431static void cqspi_controller_init(struct cqspi_st *cqspi)
   1432{
   1433	u32 reg;
   1434
   1435	cqspi_controller_enable(cqspi, 0);
   1436
   1437	/* Configure the remap address register, no remap */
   1438	writel(0, cqspi->iobase + CQSPI_REG_REMAP);
   1439
   1440	/* Disable all interrupts. */
   1441	writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
   1442
   1443	/* Configure the SRAM split to 1:1 . */
   1444	writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
   1445
   1446	/* Load indirect trigger address. */
   1447	writel(cqspi->trigger_address,
   1448	       cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
   1449
   1450	/* Program read watermark -- 1/2 of the FIFO. */
   1451	writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
   1452	       cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
   1453	/* Program write watermark -- 1/8 of the FIFO. */
   1454	writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
   1455	       cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
   1456
   1457	/* Disable direct access controller */
   1458	if (!cqspi->use_direct_mode) {
   1459		reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
   1460		reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
   1461		writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
   1462	}
   1463
   1464	/* Enable DMA interface */
   1465	if (cqspi->use_dma_read) {
   1466		reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
   1467		reg |= CQSPI_REG_CONFIG_DMA_MASK;
   1468		writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
   1469	}
   1470
   1471	cqspi_controller_enable(cqspi, 1);
   1472}
   1473
   1474static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
   1475{
   1476	dma_cap_mask_t mask;
   1477
   1478	dma_cap_zero(mask);
   1479	dma_cap_set(DMA_MEMCPY, mask);
   1480
   1481	cqspi->rx_chan = dma_request_chan_by_mask(&mask);
   1482	if (IS_ERR(cqspi->rx_chan)) {
   1483		int ret = PTR_ERR(cqspi->rx_chan);
   1484
   1485		cqspi->rx_chan = NULL;
   1486		return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
   1487	}
   1488	init_completion(&cqspi->rx_dma_complete);
   1489
   1490	return 0;
   1491}
   1492
   1493static const char *cqspi_get_name(struct spi_mem *mem)
   1494{
   1495	struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
   1496	struct device *dev = &cqspi->pdev->dev;
   1497
   1498	return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
   1499}
   1500
   1501static const struct spi_controller_mem_ops cqspi_mem_ops = {
   1502	.exec_op = cqspi_exec_mem_op,
   1503	.get_name = cqspi_get_name,
   1504	.supports_op = cqspi_supports_mem_op,
   1505};
   1506
   1507static const struct spi_controller_mem_caps cqspi_mem_caps = {
   1508	.dtr = true,
   1509};
   1510
   1511static int cqspi_setup_flash(struct cqspi_st *cqspi)
   1512{
   1513	struct platform_device *pdev = cqspi->pdev;
   1514	struct device *dev = &pdev->dev;
   1515	struct device_node *np = dev->of_node;
   1516	struct cqspi_flash_pdata *f_pdata;
   1517	unsigned int cs;
   1518	int ret;
   1519
   1520	/* Get flash device data */
   1521	for_each_available_child_of_node(dev->of_node, np) {
   1522		ret = of_property_read_u32(np, "reg", &cs);
   1523		if (ret) {
   1524			dev_err(dev, "Couldn't determine chip select.\n");
   1525			of_node_put(np);
   1526			return ret;
   1527		}
   1528
   1529		if (cs >= CQSPI_MAX_CHIPSELECT) {
   1530			dev_err(dev, "Chip select %d out of range.\n", cs);
   1531			of_node_put(np);
   1532			return -EINVAL;
   1533		}
   1534
   1535		f_pdata = &cqspi->f_pdata[cs];
   1536		f_pdata->cqspi = cqspi;
   1537		f_pdata->cs = cs;
   1538
   1539		ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
   1540		if (ret) {
   1541			of_node_put(np);
   1542			return ret;
   1543		}
   1544	}
   1545
   1546	return 0;
   1547}
   1548
   1549static int cqspi_probe(struct platform_device *pdev)
   1550{
   1551	const struct cqspi_driver_platdata *ddata;
   1552	struct reset_control *rstc, *rstc_ocp;
   1553	struct device *dev = &pdev->dev;
   1554	struct spi_master *master;
   1555	struct resource *res_ahb;
   1556	struct cqspi_st *cqspi;
   1557	struct resource *res;
   1558	int ret;
   1559	int irq;
   1560
   1561	master = devm_spi_alloc_master(&pdev->dev, sizeof(*cqspi));
   1562	if (!master) {
   1563		dev_err(&pdev->dev, "spi_alloc_master failed\n");
   1564		return -ENOMEM;
   1565	}
   1566	master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
   1567	master->mem_ops = &cqspi_mem_ops;
   1568	master->mem_caps = &cqspi_mem_caps;
   1569	master->dev.of_node = pdev->dev.of_node;
   1570
   1571	cqspi = spi_master_get_devdata(master);
   1572
   1573	cqspi->pdev = pdev;
   1574	cqspi->master = master;
   1575	platform_set_drvdata(pdev, cqspi);
   1576
   1577	/* Obtain configuration from OF. */
   1578	ret = cqspi_of_get_pdata(cqspi);
   1579	if (ret) {
   1580		dev_err(dev, "Cannot get mandatory OF data.\n");
   1581		ret = -ENODEV;
   1582		goto probe_master_put;
   1583	}
   1584
   1585	/* Obtain QSPI clock. */
   1586	cqspi->clk = devm_clk_get(dev, NULL);
   1587	if (IS_ERR(cqspi->clk)) {
   1588		dev_err(dev, "Cannot claim QSPI clock.\n");
   1589		ret = PTR_ERR(cqspi->clk);
   1590		goto probe_master_put;
   1591	}
   1592
   1593	/* Obtain and remap controller address. */
   1594	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1595	cqspi->iobase = devm_ioremap_resource(dev, res);
   1596	if (IS_ERR(cqspi->iobase)) {
   1597		dev_err(dev, "Cannot remap controller address.\n");
   1598		ret = PTR_ERR(cqspi->iobase);
   1599		goto probe_master_put;
   1600	}
   1601
   1602	/* Obtain and remap AHB address. */
   1603	res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
   1604	cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
   1605	if (IS_ERR(cqspi->ahb_base)) {
   1606		dev_err(dev, "Cannot remap AHB address.\n");
   1607		ret = PTR_ERR(cqspi->ahb_base);
   1608		goto probe_master_put;
   1609	}
   1610	cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
   1611	cqspi->ahb_size = resource_size(res_ahb);
   1612
   1613	init_completion(&cqspi->transfer_complete);
   1614
   1615	/* Obtain IRQ line. */
   1616	irq = platform_get_irq(pdev, 0);
   1617	if (irq < 0) {
   1618		ret = -ENXIO;
   1619		goto probe_master_put;
   1620	}
   1621
   1622	pm_runtime_enable(dev);
   1623	ret = pm_runtime_resume_and_get(dev);
   1624	if (ret < 0)
   1625		goto probe_master_put;
   1626
   1627	ret = clk_prepare_enable(cqspi->clk);
   1628	if (ret) {
   1629		dev_err(dev, "Cannot enable QSPI clock.\n");
   1630		goto probe_clk_failed;
   1631	}
   1632
   1633	/* Obtain QSPI reset control */
   1634	rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
   1635	if (IS_ERR(rstc)) {
   1636		ret = PTR_ERR(rstc);
   1637		dev_err(dev, "Cannot get QSPI reset.\n");
   1638		goto probe_reset_failed;
   1639	}
   1640
   1641	rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
   1642	if (IS_ERR(rstc_ocp)) {
   1643		ret = PTR_ERR(rstc_ocp);
   1644		dev_err(dev, "Cannot get QSPI OCP reset.\n");
   1645		goto probe_reset_failed;
   1646	}
   1647
   1648	reset_control_assert(rstc);
   1649	reset_control_deassert(rstc);
   1650
   1651	reset_control_assert(rstc_ocp);
   1652	reset_control_deassert(rstc_ocp);
   1653
   1654	cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
   1655	master->max_speed_hz = cqspi->master_ref_clk_hz;
   1656
   1657	/* write completion is supported by default */
   1658	cqspi->wr_completion = true;
   1659
   1660	ddata  = of_device_get_match_data(dev);
   1661	if (ddata) {
   1662		if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
   1663			cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
   1664						cqspi->master_ref_clk_hz);
   1665		if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
   1666			master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
   1667		if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
   1668			cqspi->use_direct_mode = true;
   1669		if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
   1670			cqspi->use_dma_read = true;
   1671		if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
   1672			cqspi->wr_completion = false;
   1673
   1674		if (of_device_is_compatible(pdev->dev.of_node,
   1675					    "xlnx,versal-ospi-1.0"))
   1676			dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
   1677	}
   1678
   1679	ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
   1680			       pdev->name, cqspi);
   1681	if (ret) {
   1682		dev_err(dev, "Cannot request IRQ.\n");
   1683		goto probe_reset_failed;
   1684	}
   1685
   1686	cqspi_wait_idle(cqspi);
   1687	cqspi_controller_init(cqspi);
   1688	cqspi->current_cs = -1;
   1689	cqspi->sclk = 0;
   1690
   1691	master->num_chipselect = cqspi->num_chipselect;
   1692
   1693	ret = cqspi_setup_flash(cqspi);
   1694	if (ret) {
   1695		dev_err(dev, "failed to setup flash parameters %d\n", ret);
   1696		goto probe_setup_failed;
   1697	}
   1698
   1699	if (cqspi->use_direct_mode) {
   1700		ret = cqspi_request_mmap_dma(cqspi);
   1701		if (ret == -EPROBE_DEFER)
   1702			goto probe_setup_failed;
   1703	}
   1704
   1705	ret = spi_register_master(master);
   1706	if (ret) {
   1707		dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
   1708		goto probe_setup_failed;
   1709	}
   1710
   1711	return 0;
   1712probe_setup_failed:
   1713	cqspi_controller_enable(cqspi, 0);
   1714probe_reset_failed:
   1715	clk_disable_unprepare(cqspi->clk);
   1716probe_clk_failed:
   1717	pm_runtime_put_sync(dev);
   1718	pm_runtime_disable(dev);
   1719probe_master_put:
   1720	spi_master_put(master);
   1721	return ret;
   1722}
   1723
   1724static int cqspi_remove(struct platform_device *pdev)
   1725{
   1726	struct cqspi_st *cqspi = platform_get_drvdata(pdev);
   1727
   1728	spi_unregister_master(cqspi->master);
   1729	cqspi_controller_enable(cqspi, 0);
   1730
   1731	if (cqspi->rx_chan)
   1732		dma_release_channel(cqspi->rx_chan);
   1733
   1734	clk_disable_unprepare(cqspi->clk);
   1735
   1736	pm_runtime_put_sync(&pdev->dev);
   1737	pm_runtime_disable(&pdev->dev);
   1738
   1739	return 0;
   1740}
   1741
   1742#ifdef CONFIG_PM_SLEEP
   1743static int cqspi_suspend(struct device *dev)
   1744{
   1745	struct cqspi_st *cqspi = dev_get_drvdata(dev);
   1746
   1747	cqspi_controller_enable(cqspi, 0);
   1748	return 0;
   1749}
   1750
   1751static int cqspi_resume(struct device *dev)
   1752{
   1753	struct cqspi_st *cqspi = dev_get_drvdata(dev);
   1754
   1755	cqspi_controller_enable(cqspi, 1);
   1756	return 0;
   1757}
   1758
   1759static const struct dev_pm_ops cqspi__dev_pm_ops = {
   1760	.suspend = cqspi_suspend,
   1761	.resume = cqspi_resume,
   1762};
   1763
   1764#define CQSPI_DEV_PM_OPS	(&cqspi__dev_pm_ops)
   1765#else
   1766#define CQSPI_DEV_PM_OPS	NULL
   1767#endif
   1768
   1769static const struct cqspi_driver_platdata cdns_qspi = {
   1770	.quirks = CQSPI_DISABLE_DAC_MODE,
   1771};
   1772
   1773static const struct cqspi_driver_platdata k2g_qspi = {
   1774	.quirks = CQSPI_NEEDS_WR_DELAY,
   1775};
   1776
   1777static const struct cqspi_driver_platdata am654_ospi = {
   1778	.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
   1779	.quirks = CQSPI_NEEDS_WR_DELAY,
   1780};
   1781
   1782static const struct cqspi_driver_platdata intel_lgm_qspi = {
   1783	.quirks = CQSPI_DISABLE_DAC_MODE,
   1784};
   1785
   1786static const struct cqspi_driver_platdata socfpga_qspi = {
   1787	.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
   1788};
   1789
   1790static const struct cqspi_driver_platdata versal_ospi = {
   1791	.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
   1792	.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
   1793	.indirect_read_dma = cqspi_versal_indirect_read_dma,
   1794	.get_dma_status = cqspi_get_versal_dma_status,
   1795};
   1796
   1797static const struct of_device_id cqspi_dt_ids[] = {
   1798	{
   1799		.compatible = "cdns,qspi-nor",
   1800		.data = &cdns_qspi,
   1801	},
   1802	{
   1803		.compatible = "ti,k2g-qspi",
   1804		.data = &k2g_qspi,
   1805	},
   1806	{
   1807		.compatible = "ti,am654-ospi",
   1808		.data = &am654_ospi,
   1809	},
   1810	{
   1811		.compatible = "intel,lgm-qspi",
   1812		.data = &intel_lgm_qspi,
   1813	},
   1814	{
   1815		.compatible = "xlnx,versal-ospi-1.0",
   1816		.data = &versal_ospi,
   1817	},
   1818	{
   1819		.compatible = "intel,socfpga-qspi",
   1820		.data = &socfpga_qspi,
   1821	},
   1822	{ /* end of table */ }
   1823};
   1824
   1825MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
   1826
   1827static struct platform_driver cqspi_platform_driver = {
   1828	.probe = cqspi_probe,
   1829	.remove = cqspi_remove,
   1830	.driver = {
   1831		.name = CQSPI_NAME,
   1832		.pm = CQSPI_DEV_PM_OPS,
   1833		.of_match_table = cqspi_dt_ids,
   1834	},
   1835};
   1836
   1837module_platform_driver(cqspi_platform_driver);
   1838
   1839MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
   1840MODULE_LICENSE("GPL v2");
   1841MODULE_ALIAS("platform:" CQSPI_NAME);
   1842MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
   1843MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
   1844MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
   1845MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
   1846MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");