cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-tegra210-quad.c (45860B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2//
      3// Copyright (C) 2020 NVIDIA CORPORATION.
      4
      5#include <linux/clk.h>
      6#include <linux/completion.h>
      7#include <linux/delay.h>
      8#include <linux/dmaengine.h>
      9#include <linux/dma-mapping.h>
     10#include <linux/dmapool.h>
     11#include <linux/err.h>
     12#include <linux/interrupt.h>
     13#include <linux/io.h>
     14#include <linux/iopoll.h>
     15#include <linux/kernel.h>
     16#include <linux/kthread.h>
     17#include <linux/module.h>
     18#include <linux/platform_device.h>
     19#include <linux/pm_runtime.h>
     20#include <linux/of.h>
     21#include <linux/of_device.h>
     22#include <linux/reset.h>
     23#include <linux/spi/spi.h>
     24#include <linux/acpi.h>
     25#include <linux/property.h>
     26
     27#define QSPI_COMMAND1				0x000
     28#define QSPI_BIT_LENGTH(x)			(((x) & 0x1f) << 0)
     29#define QSPI_PACKED				BIT(5)
     30#define QSPI_INTERFACE_WIDTH_MASK		(0x03 << 7)
     31#define QSPI_INTERFACE_WIDTH(x)			(((x) & 0x03) << 7)
     32#define QSPI_INTERFACE_WIDTH_SINGLE		QSPI_INTERFACE_WIDTH(0)
     33#define QSPI_INTERFACE_WIDTH_DUAL		QSPI_INTERFACE_WIDTH(1)
     34#define QSPI_INTERFACE_WIDTH_QUAD		QSPI_INTERFACE_WIDTH(2)
     35#define QSPI_SDR_DDR_SEL			BIT(9)
     36#define QSPI_TX_EN				BIT(11)
     37#define QSPI_RX_EN				BIT(12)
     38#define QSPI_CS_SW_VAL				BIT(20)
     39#define QSPI_CS_SW_HW				BIT(21)
     40#define QSPI_CONTROL_MODE_0			(0 << 28)
     41#define QSPI_CONTROL_MODE_3			(3 << 28)
     42#define QSPI_CONTROL_MODE_MASK			(3 << 28)
     43#define QSPI_M_S				BIT(30)
     44#define QSPI_PIO				BIT(31)
     45
     46#define QSPI_COMMAND2				0x004
     47#define QSPI_TX_TAP_DELAY(x)			(((x) & 0x3f) << 10)
     48#define QSPI_RX_TAP_DELAY(x)			(((x) & 0xff) << 0)
     49
     50#define QSPI_CS_TIMING1				0x008
     51#define QSPI_SETUP_HOLD(setup, hold)		(((setup) << 4) | (hold))
     52
     53#define QSPI_CS_TIMING2				0x00c
     54#define CYCLES_BETWEEN_PACKETS_0(x)		(((x) & 0x1f) << 0)
     55#define CS_ACTIVE_BETWEEN_PACKETS_0		BIT(5)
     56
     57#define QSPI_TRANS_STATUS			0x010
     58#define QSPI_BLK_CNT(val)			(((val) >> 0) & 0xffff)
     59#define QSPI_RDY				BIT(30)
     60
     61#define QSPI_FIFO_STATUS			0x014
     62#define QSPI_RX_FIFO_EMPTY			BIT(0)
     63#define QSPI_RX_FIFO_FULL			BIT(1)
     64#define QSPI_TX_FIFO_EMPTY			BIT(2)
     65#define QSPI_TX_FIFO_FULL			BIT(3)
     66#define QSPI_RX_FIFO_UNF			BIT(4)
     67#define QSPI_RX_FIFO_OVF			BIT(5)
     68#define QSPI_TX_FIFO_UNF			BIT(6)
     69#define QSPI_TX_FIFO_OVF			BIT(7)
     70#define QSPI_ERR				BIT(8)
     71#define QSPI_TX_FIFO_FLUSH			BIT(14)
     72#define QSPI_RX_FIFO_FLUSH			BIT(15)
     73#define QSPI_TX_FIFO_EMPTY_COUNT(val)		(((val) >> 16) & 0x7f)
     74#define QSPI_RX_FIFO_FULL_COUNT(val)		(((val) >> 23) & 0x7f)
     75
     76#define QSPI_FIFO_ERROR				(QSPI_RX_FIFO_UNF | \
     77						 QSPI_RX_FIFO_OVF | \
     78						 QSPI_TX_FIFO_UNF | \
     79						 QSPI_TX_FIFO_OVF)
     80#define QSPI_FIFO_EMPTY				(QSPI_RX_FIFO_EMPTY | \
     81						 QSPI_TX_FIFO_EMPTY)
     82
     83#define QSPI_TX_DATA				0x018
     84#define QSPI_RX_DATA				0x01c
     85
     86#define QSPI_DMA_CTL				0x020
     87#define QSPI_TX_TRIG(n)				(((n) & 0x3) << 15)
     88#define QSPI_TX_TRIG_1				QSPI_TX_TRIG(0)
     89#define QSPI_TX_TRIG_4				QSPI_TX_TRIG(1)
     90#define QSPI_TX_TRIG_8				QSPI_TX_TRIG(2)
     91#define QSPI_TX_TRIG_16				QSPI_TX_TRIG(3)
     92
     93#define QSPI_RX_TRIG(n)				(((n) & 0x3) << 19)
     94#define QSPI_RX_TRIG_1				QSPI_RX_TRIG(0)
     95#define QSPI_RX_TRIG_4				QSPI_RX_TRIG(1)
     96#define QSPI_RX_TRIG_8				QSPI_RX_TRIG(2)
     97#define QSPI_RX_TRIG_16				QSPI_RX_TRIG(3)
     98
     99#define QSPI_DMA_EN				BIT(31)
    100
    101#define QSPI_DMA_BLK				0x024
    102#define QSPI_DMA_BLK_SET(x)			(((x) & 0xffff) << 0)
    103
    104#define QSPI_TX_FIFO				0x108
    105#define QSPI_RX_FIFO				0x188
    106
    107#define QSPI_FIFO_DEPTH				64
    108
    109#define QSPI_INTR_MASK				0x18c
    110#define QSPI_INTR_RX_FIFO_UNF_MASK		BIT(25)
    111#define QSPI_INTR_RX_FIFO_OVF_MASK		BIT(26)
    112#define QSPI_INTR_TX_FIFO_UNF_MASK		BIT(27)
    113#define QSPI_INTR_TX_FIFO_OVF_MASK		BIT(28)
    114#define QSPI_INTR_RDY_MASK			BIT(29)
    115#define QSPI_INTR_RX_TX_FIFO_ERR		(QSPI_INTR_RX_FIFO_UNF_MASK | \
    116						 QSPI_INTR_RX_FIFO_OVF_MASK | \
    117						 QSPI_INTR_TX_FIFO_UNF_MASK | \
    118						 QSPI_INTR_TX_FIFO_OVF_MASK)
    119
    120#define QSPI_MISC_REG                           0x194
    121#define QSPI_NUM_DUMMY_CYCLE(x)			(((x) & 0xff) << 0)
    122#define QSPI_DUMMY_CYCLES_MAX			0xff
    123
    124#define QSPI_CMB_SEQ_CMD			0x19c
    125#define QSPI_COMMAND_VALUE_SET(X)		(((x) & 0xFF) << 0)
    126
    127#define QSPI_CMB_SEQ_CMD_CFG			0x1a0
    128#define QSPI_COMMAND_X1_X2_X4(x)		(((x) & 0x3) << 13)
    129#define QSPI_COMMAND_X1_X2_X4_MASK		(0x03 << 13)
    130#define QSPI_COMMAND_SDR_DDR			BIT(12)
    131#define QSPI_COMMAND_SIZE_SET(x)		(((x) & 0xFF) << 0)
    132
    133#define QSPI_GLOBAL_CONFIG			0X1a4
    134#define QSPI_CMB_SEQ_EN				BIT(0)
    135
    136#define QSPI_CMB_SEQ_ADDR			0x1a8
    137#define QSPI_ADDRESS_VALUE_SET(X)		(((x) & 0xFFFF) << 0)
    138
    139#define QSPI_CMB_SEQ_ADDR_CFG			0x1ac
    140#define QSPI_ADDRESS_X1_X2_X4(x)		(((x) & 0x3) << 13)
    141#define QSPI_ADDRESS_X1_X2_X4_MASK		(0x03 << 13)
    142#define QSPI_ADDRESS_SDR_DDR			BIT(12)
    143#define QSPI_ADDRESS_SIZE_SET(x)		(((x) & 0xFF) << 0)
    144
    145#define DATA_DIR_TX				BIT(0)
    146#define DATA_DIR_RX				BIT(1)
    147
    148#define QSPI_DMA_TIMEOUT			(msecs_to_jiffies(1000))
    149#define DEFAULT_QSPI_DMA_BUF_LEN		(64 * 1024)
    150#define CMD_TRANSFER				0
    151#define ADDR_TRANSFER				1
    152#define DATA_TRANSFER				2
    153
    154struct tegra_qspi_soc_data {
    155	bool has_dma;
    156	bool cmb_xfer_capable;
    157};
    158
    159struct tegra_qspi_client_data {
    160	int tx_clk_tap_delay;
    161	int rx_clk_tap_delay;
    162};
    163
    164struct tegra_qspi {
    165	struct device				*dev;
    166	struct spi_master			*master;
    167	/* lock to protect data accessed by irq */
    168	spinlock_t				lock;
    169
    170	struct clk				*clk;
    171	void __iomem				*base;
    172	phys_addr_t				phys;
    173	unsigned int				irq;
    174
    175	u32					cur_speed;
    176	unsigned int				cur_pos;
    177	unsigned int				words_per_32bit;
    178	unsigned int				bytes_per_word;
    179	unsigned int				curr_dma_words;
    180	unsigned int				cur_direction;
    181
    182	unsigned int				cur_rx_pos;
    183	unsigned int				cur_tx_pos;
    184
    185	unsigned int				dma_buf_size;
    186	unsigned int				max_buf_size;
    187	bool					is_curr_dma_xfer;
    188
    189	struct completion			rx_dma_complete;
    190	struct completion			tx_dma_complete;
    191
    192	u32					tx_status;
    193	u32					rx_status;
    194	u32					status_reg;
    195	bool					is_packed;
    196	bool					use_dma;
    197
    198	u32					command1_reg;
    199	u32					dma_control_reg;
    200	u32					def_command1_reg;
    201	u32					def_command2_reg;
    202	u32					spi_cs_timing1;
    203	u32					spi_cs_timing2;
    204	u8					dummy_cycles;
    205
    206	struct completion			xfer_completion;
    207	struct spi_transfer			*curr_xfer;
    208
    209	struct dma_chan				*rx_dma_chan;
    210	u32					*rx_dma_buf;
    211	dma_addr_t				rx_dma_phys;
    212	struct dma_async_tx_descriptor		*rx_dma_desc;
    213
    214	struct dma_chan				*tx_dma_chan;
    215	u32					*tx_dma_buf;
    216	dma_addr_t				tx_dma_phys;
    217	struct dma_async_tx_descriptor		*tx_dma_desc;
    218	const struct tegra_qspi_soc_data	*soc_data;
    219};
    220
    221static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
    222{
    223	return readl(tqspi->base + offset);
    224}
    225
    226static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
    227{
    228	writel(value, tqspi->base + offset);
    229
    230	/* read back register to make sure that register writes completed */
    231	if (offset != QSPI_TX_FIFO)
    232		readl(tqspi->base + QSPI_COMMAND1);
    233}
    234
    235static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
    236{
    237	u32 value;
    238
    239	/* write 1 to clear status register */
    240	value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
    241	tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
    242
    243	value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
    244	if (!(value & QSPI_INTR_RDY_MASK)) {
    245		value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
    246		tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
    247	}
    248
    249	/* clear fifo status error if any */
    250	value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
    251	if (value & QSPI_ERR)
    252		tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
    253}
    254
    255static unsigned int
    256tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
    257{
    258	unsigned int max_word, max_len, total_fifo_words;
    259	unsigned int remain_len = t->len - tqspi->cur_pos;
    260	unsigned int bits_per_word = t->bits_per_word;
    261
    262	tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
    263
    264	/*
    265	 * Tegra QSPI controller supports packed or unpacked mode transfers.
    266	 * Packed mode is used for data transfers using 8, 16, or 32 bits per
    267	 * word with a minimum transfer of 1 word and for all other transfers
    268	 * unpacked mode will be used.
    269	 */
    270
    271	if ((bits_per_word == 8 || bits_per_word == 16 ||
    272	     bits_per_word == 32) && t->len > 3) {
    273		tqspi->is_packed = true;
    274		tqspi->words_per_32bit = 32 / bits_per_word;
    275	} else {
    276		tqspi->is_packed = false;
    277		tqspi->words_per_32bit = 1;
    278	}
    279
    280	if (tqspi->is_packed) {
    281		max_len = min(remain_len, tqspi->max_buf_size);
    282		tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
    283		total_fifo_words = (max_len + 3) / 4;
    284	} else {
    285		max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
    286		max_word = min(max_word, tqspi->max_buf_size / 4);
    287		tqspi->curr_dma_words = max_word;
    288		total_fifo_words = max_word;
    289	}
    290
    291	return total_fifo_words;
    292}
    293
    294static unsigned int
    295tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
    296{
    297	unsigned int written_words, fifo_words_left, count;
    298	unsigned int len, tx_empty_count, max_n_32bit, i;
    299	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
    300	u32 fifo_status;
    301
    302	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
    303	tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
    304
    305	if (tqspi->is_packed) {
    306		fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
    307		written_words = min(fifo_words_left, tqspi->curr_dma_words);
    308		len = written_words * tqspi->bytes_per_word;
    309		max_n_32bit = DIV_ROUND_UP(len, 4);
    310		for (count = 0; count < max_n_32bit; count++) {
    311			u32 x = 0;
    312
    313			for (i = 0; (i < 4) && len; i++, len--)
    314				x |= (u32)(*tx_buf++) << (i * 8);
    315			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
    316		}
    317
    318		tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
    319	} else {
    320		unsigned int write_bytes;
    321		u8 bytes_per_word = tqspi->bytes_per_word;
    322
    323		max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
    324		written_words = max_n_32bit;
    325		len = written_words * tqspi->bytes_per_word;
    326		if (len > t->len - tqspi->cur_pos)
    327			len = t->len - tqspi->cur_pos;
    328		write_bytes = len;
    329		for (count = 0; count < max_n_32bit; count++) {
    330			u32 x = 0;
    331
    332			for (i = 0; len && (i < bytes_per_word); i++, len--)
    333				x |= (u32)(*tx_buf++) << (i * 8);
    334			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
    335		}
    336
    337		tqspi->cur_tx_pos += write_bytes;
    338	}
    339
    340	return written_words;
    341}
    342
    343static unsigned int
    344tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
    345{
    346	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
    347	unsigned int len, rx_full_count, count, i;
    348	unsigned int read_words = 0;
    349	u32 fifo_status, x;
    350
    351	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
    352	rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
    353	if (tqspi->is_packed) {
    354		len = tqspi->curr_dma_words * tqspi->bytes_per_word;
    355		for (count = 0; count < rx_full_count; count++) {
    356			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
    357
    358			for (i = 0; len && (i < 4); i++, len--)
    359				*rx_buf++ = (x >> i * 8) & 0xff;
    360		}
    361
    362		read_words += tqspi->curr_dma_words;
    363		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
    364	} else {
    365		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
    366		u8 bytes_per_word = tqspi->bytes_per_word;
    367		unsigned int read_bytes;
    368
    369		len = rx_full_count * bytes_per_word;
    370		if (len > t->len - tqspi->cur_pos)
    371			len = t->len - tqspi->cur_pos;
    372		read_bytes = len;
    373		for (count = 0; count < rx_full_count; count++) {
    374			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
    375
    376			for (i = 0; len && (i < bytes_per_word); i++, len--)
    377				*rx_buf++ = (x >> (i * 8)) & 0xff;
    378		}
    379
    380		read_words += rx_full_count;
    381		tqspi->cur_rx_pos += read_bytes;
    382	}
    383
    384	return read_words;
    385}
    386
    387static void
    388tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
    389{
    390	dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
    391				tqspi->dma_buf_size, DMA_TO_DEVICE);
    392
    393	/*
    394	 * In packed mode, each word in FIFO may contain multiple packets
    395	 * based on bits per word. So all bytes in each FIFO word are valid.
    396	 *
    397	 * In unpacked mode, each word in FIFO contains single packet and
    398	 * based on bits per word any remaining bits in FIFO word will be
    399	 * ignored by the hardware and are invalid bits.
    400	 */
    401	if (tqspi->is_packed) {
    402		tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
    403	} else {
    404		u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
    405		unsigned int i, count, consume, write_bytes;
    406
    407		/*
    408		 * Fill tx_dma_buf to contain single packet in each word based
    409		 * on bits per word from SPI core tx_buf.
    410		 */
    411		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
    412		if (consume > t->len - tqspi->cur_pos)
    413			consume = t->len - tqspi->cur_pos;
    414		write_bytes = consume;
    415		for (count = 0; count < tqspi->curr_dma_words; count++) {
    416			u32 x = 0;
    417
    418			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
    419				x |= (u32)(*tx_buf++) << (i * 8);
    420			tqspi->tx_dma_buf[count] = x;
    421		}
    422
    423		tqspi->cur_tx_pos += write_bytes;
    424	}
    425
    426	dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
    427				   tqspi->dma_buf_size, DMA_TO_DEVICE);
    428}
    429
    430static void
    431tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
    432{
    433	dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
    434				tqspi->dma_buf_size, DMA_FROM_DEVICE);
    435
    436	if (tqspi->is_packed) {
    437		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
    438	} else {
    439		unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
    440		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
    441		unsigned int i, count, consume, read_bytes;
    442
    443		/*
    444		 * Each FIFO word contains single data packet.
    445		 * Skip invalid bits in each FIFO word based on bits per word
    446		 * and align bytes while filling in SPI core rx_buf.
    447		 */
    448		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
    449		if (consume > t->len - tqspi->cur_pos)
    450			consume = t->len - tqspi->cur_pos;
    451		read_bytes = consume;
    452		for (count = 0; count < tqspi->curr_dma_words; count++) {
    453			u32 x = tqspi->rx_dma_buf[count] & rx_mask;
    454
    455			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
    456				*rx_buf++ = (x >> (i * 8)) & 0xff;
    457		}
    458
    459		tqspi->cur_rx_pos += read_bytes;
    460	}
    461
    462	dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
    463				   tqspi->dma_buf_size, DMA_FROM_DEVICE);
    464}
    465
    466static void tegra_qspi_dma_complete(void *args)
    467{
    468	struct completion *dma_complete = args;
    469
    470	complete(dma_complete);
    471}
    472
    473static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
    474{
    475	dma_addr_t tx_dma_phys;
    476
    477	reinit_completion(&tqspi->tx_dma_complete);
    478
    479	if (tqspi->is_packed)
    480		tx_dma_phys = t->tx_dma;
    481	else
    482		tx_dma_phys = tqspi->tx_dma_phys;
    483
    484	tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
    485							 len, DMA_MEM_TO_DEV,
    486							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
    487
    488	if (!tqspi->tx_dma_desc) {
    489		dev_err(tqspi->dev, "Unable to get TX descriptor\n");
    490		return -EIO;
    491	}
    492
    493	tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
    494	tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
    495	dmaengine_submit(tqspi->tx_dma_desc);
    496	dma_async_issue_pending(tqspi->tx_dma_chan);
    497
    498	return 0;
    499}
    500
    501static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
    502{
    503	dma_addr_t rx_dma_phys;
    504
    505	reinit_completion(&tqspi->rx_dma_complete);
    506
    507	if (tqspi->is_packed)
    508		rx_dma_phys = t->rx_dma;
    509	else
    510		rx_dma_phys = tqspi->rx_dma_phys;
    511
    512	tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
    513							 len, DMA_DEV_TO_MEM,
    514							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
    515
    516	if (!tqspi->rx_dma_desc) {
    517		dev_err(tqspi->dev, "Unable to get RX descriptor\n");
    518		return -EIO;
    519	}
    520
    521	tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
    522	tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
    523	dmaengine_submit(tqspi->rx_dma_desc);
    524	dma_async_issue_pending(tqspi->rx_dma_chan);
    525
    526	return 0;
    527}
    528
    529static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
    530{
    531	void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
    532	u32 val;
    533
    534	val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
    535	if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
    536		return 0;
    537
    538	val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
    539	tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
    540
    541	if (!atomic)
    542		return readl_relaxed_poll_timeout(addr, val,
    543						  (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
    544						  1000, 1000000);
    545
    546	return readl_relaxed_poll_timeout_atomic(addr, val,
    547						 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
    548						 1000, 1000000);
    549}
    550
    551static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
    552{
    553	u32 intr_mask;
    554
    555	intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
    556	intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
    557	tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
    558}
    559
    560static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
    561{
    562	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
    563	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
    564	unsigned int len;
    565
    566	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
    567
    568	if (t->tx_buf) {
    569		t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
    570		if (dma_mapping_error(tqspi->dev, t->tx_dma))
    571			return -ENOMEM;
    572	}
    573
    574	if (t->rx_buf) {
    575		t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
    576		if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
    577			dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
    578			return -ENOMEM;
    579		}
    580	}
    581
    582	return 0;
    583}
    584
    585static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
    586{
    587	unsigned int len;
    588
    589	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
    590
    591	dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
    592	dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
    593}
    594
    595static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
    596{
    597	struct dma_slave_config dma_sconfig = { 0 };
    598	unsigned int len;
    599	u8 dma_burst;
    600	int ret = 0;
    601	u32 val;
    602
    603	if (tqspi->is_packed) {
    604		ret = tegra_qspi_dma_map_xfer(tqspi, t);
    605		if (ret < 0)
    606			return ret;
    607	}
    608
    609	val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
    610	tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
    611
    612	tegra_qspi_unmask_irq(tqspi);
    613
    614	if (tqspi->is_packed)
    615		len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
    616	else
    617		len = tqspi->curr_dma_words * 4;
    618
    619	/* set attention level based on length of transfer */
    620	val = 0;
    621	if (len & 0xf) {
    622		val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
    623		dma_burst = 1;
    624	} else if (((len) >> 4) & 0x1) {
    625		val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
    626		dma_burst = 4;
    627	} else {
    628		val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
    629		dma_burst = 8;
    630	}
    631
    632	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
    633	tqspi->dma_control_reg = val;
    634
    635	dma_sconfig.device_fc = true;
    636	if (tqspi->cur_direction & DATA_DIR_TX) {
    637		dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
    638		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    639		dma_sconfig.dst_maxburst = dma_burst;
    640		ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
    641		if (ret < 0) {
    642			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
    643			return ret;
    644		}
    645
    646		tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
    647		ret = tegra_qspi_start_tx_dma(tqspi, t, len);
    648		if (ret < 0) {
    649			dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
    650			return ret;
    651		}
    652	}
    653
    654	if (tqspi->cur_direction & DATA_DIR_RX) {
    655		dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
    656		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    657		dma_sconfig.src_maxburst = dma_burst;
    658		ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
    659		if (ret < 0) {
    660			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
    661			return ret;
    662		}
    663
    664		dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
    665					   tqspi->dma_buf_size,
    666					   DMA_FROM_DEVICE);
    667
    668		ret = tegra_qspi_start_rx_dma(tqspi, t, len);
    669		if (ret < 0) {
    670			dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
    671			if (tqspi->cur_direction & DATA_DIR_TX)
    672				dmaengine_terminate_all(tqspi->tx_dma_chan);
    673			return ret;
    674		}
    675	}
    676
    677	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
    678
    679	tqspi->is_curr_dma_xfer = true;
    680	tqspi->dma_control_reg = val;
    681	val |= QSPI_DMA_EN;
    682	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
    683
    684	return ret;
    685}
    686
    687static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
    688{
    689	u32 val;
    690	unsigned int cur_words;
    691
    692	if (qspi->cur_direction & DATA_DIR_TX)
    693		cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
    694	else
    695		cur_words = qspi->curr_dma_words;
    696
    697	val = QSPI_DMA_BLK_SET(cur_words - 1);
    698	tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
    699
    700	tegra_qspi_unmask_irq(qspi);
    701
    702	qspi->is_curr_dma_xfer = false;
    703	val = qspi->command1_reg;
    704	val |= QSPI_PIO;
    705	tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
    706
    707	return 0;
    708}
    709
    710static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
    711{
    712	if (tqspi->tx_dma_buf) {
    713		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
    714				  tqspi->tx_dma_buf, tqspi->tx_dma_phys);
    715		tqspi->tx_dma_buf = NULL;
    716	}
    717
    718	if (tqspi->tx_dma_chan) {
    719		dma_release_channel(tqspi->tx_dma_chan);
    720		tqspi->tx_dma_chan = NULL;
    721	}
    722
    723	if (tqspi->rx_dma_buf) {
    724		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
    725				  tqspi->rx_dma_buf, tqspi->rx_dma_phys);
    726		tqspi->rx_dma_buf = NULL;
    727	}
    728
    729	if (tqspi->rx_dma_chan) {
    730		dma_release_channel(tqspi->rx_dma_chan);
    731		tqspi->rx_dma_chan = NULL;
    732	}
    733}
    734
    735static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
    736{
    737	struct dma_chan *dma_chan;
    738	dma_addr_t dma_phys;
    739	u32 *dma_buf;
    740	int err;
    741
    742	dma_chan = dma_request_chan(tqspi->dev, "rx");
    743	if (IS_ERR(dma_chan)) {
    744		err = PTR_ERR(dma_chan);
    745		goto err_out;
    746	}
    747
    748	tqspi->rx_dma_chan = dma_chan;
    749
    750	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
    751	if (!dma_buf) {
    752		err = -ENOMEM;
    753		goto err_out;
    754	}
    755
    756	tqspi->rx_dma_buf = dma_buf;
    757	tqspi->rx_dma_phys = dma_phys;
    758
    759	dma_chan = dma_request_chan(tqspi->dev, "tx");
    760	if (IS_ERR(dma_chan)) {
    761		err = PTR_ERR(dma_chan);
    762		goto err_out;
    763	}
    764
    765	tqspi->tx_dma_chan = dma_chan;
    766
    767	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
    768	if (!dma_buf) {
    769		err = -ENOMEM;
    770		goto err_out;
    771	}
    772
    773	tqspi->tx_dma_buf = dma_buf;
    774	tqspi->tx_dma_phys = dma_phys;
    775	tqspi->use_dma = true;
    776
    777	return 0;
    778
    779err_out:
    780	tegra_qspi_deinit_dma(tqspi);
    781
    782	if (err != -EPROBE_DEFER) {
    783		dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
    784		dev_err(tqspi->dev, "falling back to PIO\n");
    785		return 0;
    786	}
    787
    788	return err;
    789}
    790
    791static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
    792					 bool is_first_of_msg)
    793{
    794	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
    795	struct tegra_qspi_client_data *cdata = spi->controller_data;
    796	u32 command1, command2, speed = t->speed_hz;
    797	u8 bits_per_word = t->bits_per_word;
    798	u32 tx_tap = 0, rx_tap = 0;
    799	int req_mode;
    800
    801	if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
    802		clk_set_rate(tqspi->clk, speed);
    803		tqspi->cur_speed = speed;
    804	}
    805
    806	tqspi->cur_pos = 0;
    807	tqspi->cur_rx_pos = 0;
    808	tqspi->cur_tx_pos = 0;
    809	tqspi->curr_xfer = t;
    810
    811	if (is_first_of_msg) {
    812		tegra_qspi_mask_clear_irq(tqspi);
    813
    814		command1 = tqspi->def_command1_reg;
    815		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
    816
    817		command1 &= ~QSPI_CONTROL_MODE_MASK;
    818		req_mode = spi->mode & 0x3;
    819		if (req_mode == SPI_MODE_3)
    820			command1 |= QSPI_CONTROL_MODE_3;
    821		else
    822			command1 |= QSPI_CONTROL_MODE_0;
    823
    824		if (spi->mode & SPI_CS_HIGH)
    825			command1 |= QSPI_CS_SW_VAL;
    826		else
    827			command1 &= ~QSPI_CS_SW_VAL;
    828		tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
    829
    830		if (cdata && cdata->tx_clk_tap_delay)
    831			tx_tap = cdata->tx_clk_tap_delay;
    832
    833		if (cdata && cdata->rx_clk_tap_delay)
    834			rx_tap = cdata->rx_clk_tap_delay;
    835
    836		command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
    837		if (command2 != tqspi->def_command2_reg)
    838			tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
    839
    840	} else {
    841		command1 = tqspi->command1_reg;
    842		command1 &= ~QSPI_BIT_LENGTH(~0);
    843		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
    844	}
    845
    846	command1 &= ~QSPI_SDR_DDR_SEL;
    847
    848	return command1;
    849}
    850
    851static int tegra_qspi_start_transfer_one(struct spi_device *spi,
    852					 struct spi_transfer *t, u32 command1)
    853{
    854	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
    855	unsigned int total_fifo_words;
    856	u8 bus_width = 0;
    857	int ret;
    858
    859	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
    860
    861	command1 &= ~QSPI_PACKED;
    862	if (tqspi->is_packed)
    863		command1 |= QSPI_PACKED;
    864	tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
    865
    866	tqspi->cur_direction = 0;
    867
    868	command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
    869	if (t->rx_buf) {
    870		command1 |= QSPI_RX_EN;
    871		tqspi->cur_direction |= DATA_DIR_RX;
    872		bus_width = t->rx_nbits;
    873	}
    874
    875	if (t->tx_buf) {
    876		command1 |= QSPI_TX_EN;
    877		tqspi->cur_direction |= DATA_DIR_TX;
    878		bus_width = t->tx_nbits;
    879	}
    880
    881	command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
    882
    883	if (bus_width == SPI_NBITS_QUAD)
    884		command1 |= QSPI_INTERFACE_WIDTH_QUAD;
    885	else if (bus_width == SPI_NBITS_DUAL)
    886		command1 |= QSPI_INTERFACE_WIDTH_DUAL;
    887	else
    888		command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
    889
    890	tqspi->command1_reg = command1;
    891
    892	tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
    893
    894	ret = tegra_qspi_flush_fifos(tqspi, false);
    895	if (ret < 0)
    896		return ret;
    897
    898	if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
    899		ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
    900	else
    901		ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
    902
    903	return ret;
    904}
    905
    906static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
    907{
    908	struct tegra_qspi_client_data *cdata;
    909
    910	cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
    911	if (!cdata)
    912		return NULL;
    913
    914	device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
    915				 &cdata->tx_clk_tap_delay);
    916	device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
    917				 &cdata->rx_clk_tap_delay);
    918
    919	return cdata;
    920}
    921
    922static int tegra_qspi_setup(struct spi_device *spi)
    923{
    924	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
    925	struct tegra_qspi_client_data *cdata = spi->controller_data;
    926	unsigned long flags;
    927	u32 val;
    928	int ret;
    929
    930	ret = pm_runtime_resume_and_get(tqspi->dev);
    931	if (ret < 0) {
    932		dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
    933		return ret;
    934	}
    935
    936	if (!cdata) {
    937		cdata = tegra_qspi_parse_cdata_dt(spi);
    938		spi->controller_data = cdata;
    939	}
    940	spin_lock_irqsave(&tqspi->lock, flags);
    941
    942	/* keep default cs state to inactive */
    943	val = tqspi->def_command1_reg;
    944	if (spi->mode & SPI_CS_HIGH)
    945		val &= ~QSPI_CS_SW_VAL;
    946	else
    947		val |= QSPI_CS_SW_VAL;
    948
    949	tqspi->def_command1_reg = val;
    950	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
    951
    952	spin_unlock_irqrestore(&tqspi->lock, flags);
    953
    954	pm_runtime_put(tqspi->dev);
    955
    956	return 0;
    957}
    958
    959static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
    960{
    961	dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
    962	dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
    963		tegra_qspi_readl(tqspi, QSPI_COMMAND1),
    964		tegra_qspi_readl(tqspi, QSPI_COMMAND2));
    965	dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
    966		tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
    967		tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
    968	dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
    969		tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
    970		tegra_qspi_readl(tqspi, QSPI_MISC_REG));
    971	dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
    972		tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
    973		tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
    974}
    975
    976static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
    977{
    978	dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
    979	tegra_qspi_dump_regs(tqspi);
    980	tegra_qspi_flush_fifos(tqspi, true);
    981	if (device_reset(tqspi->dev) < 0)
    982		dev_warn_once(tqspi->dev, "device reset failed\n");
    983}
    984
    985static void tegra_qspi_transfer_end(struct spi_device *spi)
    986{
    987	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
    988	int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
    989
    990	if (cs_val)
    991		tqspi->command1_reg |= QSPI_CS_SW_VAL;
    992	else
    993		tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
    994	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
    995	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
    996}
    997
    998static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
    999{
   1000	u32 cmd_config = 0;
   1001
   1002	/* Extract Command configuration and value */
   1003	if (is_ddr)
   1004		cmd_config |= QSPI_COMMAND_SDR_DDR;
   1005	else
   1006		cmd_config &= ~QSPI_COMMAND_SDR_DDR;
   1007
   1008	cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
   1009	cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
   1010
   1011	return cmd_config;
   1012}
   1013
   1014static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
   1015{
   1016	u32 addr_config = 0;
   1017
   1018	/* Extract Address configuration and value */
   1019	is_ddr = 0; //Only SDR mode supported
   1020	bus_width = 0; //X1 mode
   1021
   1022	if (is_ddr)
   1023		addr_config |= QSPI_ADDRESS_SDR_DDR;
   1024	else
   1025		addr_config &= ~QSPI_ADDRESS_SDR_DDR;
   1026
   1027	addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
   1028	addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
   1029
   1030	return addr_config;
   1031}
   1032
   1033static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
   1034					struct spi_message *msg)
   1035{
   1036	bool is_first_msg = true;
   1037	struct spi_transfer *xfer;
   1038	struct spi_device *spi = msg->spi;
   1039	u8 transfer_phase = 0;
   1040	u32 cmd1 = 0, dma_ctl = 0;
   1041	int ret = 0;
   1042	u32 address_value = 0;
   1043	u32 cmd_config = 0, addr_config = 0;
   1044	u8 cmd_value = 0, val = 0;
   1045
   1046	/* Enable Combined sequence mode */
   1047	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
   1048	val |= QSPI_CMB_SEQ_EN;
   1049	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
   1050	/* Process individual transfer list */
   1051	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
   1052		switch (transfer_phase) {
   1053		case CMD_TRANSFER:
   1054			/* X1 SDR mode */
   1055			cmd_config = tegra_qspi_cmd_config(false, 0,
   1056							   xfer->len);
   1057			cmd_value = *((const u8 *)(xfer->tx_buf));
   1058			break;
   1059		case ADDR_TRANSFER:
   1060			/* X1 SDR mode */
   1061			addr_config = tegra_qspi_addr_config(false, 0,
   1062							     xfer->len);
   1063			address_value = *((const u32 *)(xfer->tx_buf));
   1064			break;
   1065		case DATA_TRANSFER:
   1066			/* Program Command, Address value in register */
   1067			tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
   1068			tegra_qspi_writel(tqspi, address_value,
   1069					  QSPI_CMB_SEQ_ADDR);
   1070			/* Program Command and Address config in register */
   1071			tegra_qspi_writel(tqspi, cmd_config,
   1072					  QSPI_CMB_SEQ_CMD_CFG);
   1073			tegra_qspi_writel(tqspi, addr_config,
   1074					  QSPI_CMB_SEQ_ADDR_CFG);
   1075
   1076			reinit_completion(&tqspi->xfer_completion);
   1077			cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
   1078							     is_first_msg);
   1079			ret = tegra_qspi_start_transfer_one(spi, xfer,
   1080							    cmd1);
   1081
   1082			if (ret < 0) {
   1083				dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
   1084					ret);
   1085				return ret;
   1086			}
   1087
   1088			is_first_msg = false;
   1089			ret = wait_for_completion_timeout
   1090					(&tqspi->xfer_completion,
   1091					QSPI_DMA_TIMEOUT);
   1092
   1093			if (WARN_ON(ret == 0)) {
   1094				dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
   1095					ret);
   1096				if (tqspi->is_curr_dma_xfer &&
   1097				    (tqspi->cur_direction & DATA_DIR_TX))
   1098					dmaengine_terminate_all
   1099						(tqspi->tx_dma_chan);
   1100
   1101				if (tqspi->is_curr_dma_xfer &&
   1102				    (tqspi->cur_direction & DATA_DIR_RX))
   1103					dmaengine_terminate_all
   1104						(tqspi->rx_dma_chan);
   1105
   1106				/* Abort transfer by resetting pio/dma bit */
   1107				if (!tqspi->is_curr_dma_xfer) {
   1108					cmd1 = tegra_qspi_readl
   1109							(tqspi,
   1110							 QSPI_COMMAND1);
   1111					cmd1 &= ~QSPI_PIO;
   1112					tegra_qspi_writel
   1113							(tqspi, cmd1,
   1114							 QSPI_COMMAND1);
   1115				} else {
   1116					dma_ctl = tegra_qspi_readl
   1117							(tqspi,
   1118							 QSPI_DMA_CTL);
   1119					dma_ctl &= ~QSPI_DMA_EN;
   1120					tegra_qspi_writel(tqspi, dma_ctl,
   1121							  QSPI_DMA_CTL);
   1122				}
   1123
   1124				/* Reset controller if timeout happens */
   1125				if (device_reset(tqspi->dev) < 0)
   1126					dev_warn_once(tqspi->dev,
   1127						      "device reset failed\n");
   1128				ret = -EIO;
   1129				goto exit;
   1130			}
   1131
   1132			if (tqspi->tx_status ||  tqspi->rx_status) {
   1133				dev_err(tqspi->dev, "QSPI Transfer failed\n");
   1134				tqspi->tx_status = 0;
   1135				tqspi->rx_status = 0;
   1136				ret = -EIO;
   1137				goto exit;
   1138			}
   1139			break;
   1140		default:
   1141			ret = -EINVAL;
   1142			goto exit;
   1143		}
   1144		msg->actual_length += xfer->len;
   1145		transfer_phase++;
   1146	}
   1147
   1148exit:
   1149	msg->status = ret;
   1150
   1151	return ret;
   1152}
   1153
   1154static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
   1155					    struct spi_message *msg)
   1156{
   1157	struct spi_device *spi = msg->spi;
   1158	struct spi_transfer *transfer;
   1159	bool is_first_msg = true;
   1160	int ret = 0, val = 0;
   1161
   1162	msg->status = 0;
   1163	msg->actual_length = 0;
   1164	tqspi->tx_status = 0;
   1165	tqspi->rx_status = 0;
   1166
   1167	/* Disable Combined sequence mode */
   1168	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
   1169	val &= ~QSPI_CMB_SEQ_EN;
   1170	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
   1171	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
   1172		struct spi_transfer *xfer = transfer;
   1173		u8 dummy_bytes = 0;
   1174		u32 cmd1;
   1175
   1176		tqspi->dummy_cycles = 0;
   1177		/*
   1178		 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
   1179		 * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
   1180		 * So, check if the next transfer is dummy data transfer and program dummy
   1181		 * clock cycles along with the current transfer and skip next transfer.
   1182		 */
   1183		if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
   1184			struct spi_transfer *next_xfer;
   1185
   1186			next_xfer = list_next_entry(xfer, transfer_list);
   1187			if (next_xfer->dummy_data) {
   1188				u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
   1189
   1190				if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
   1191					tqspi->dummy_cycles = dummy_cycles;
   1192					dummy_bytes = next_xfer->len;
   1193					transfer = next_xfer;
   1194				}
   1195			}
   1196		}
   1197
   1198		reinit_completion(&tqspi->xfer_completion);
   1199
   1200		cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
   1201
   1202		ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
   1203		if (ret < 0) {
   1204			dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
   1205			goto complete_xfer;
   1206		}
   1207
   1208		ret = wait_for_completion_timeout(&tqspi->xfer_completion,
   1209						  QSPI_DMA_TIMEOUT);
   1210		if (WARN_ON(ret == 0)) {
   1211			dev_err(tqspi->dev, "transfer timeout\n");
   1212			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
   1213				dmaengine_terminate_all(tqspi->tx_dma_chan);
   1214			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
   1215				dmaengine_terminate_all(tqspi->rx_dma_chan);
   1216			tegra_qspi_handle_error(tqspi);
   1217			ret = -EIO;
   1218			goto complete_xfer;
   1219		}
   1220
   1221		if (tqspi->tx_status ||  tqspi->rx_status) {
   1222			tegra_qspi_handle_error(tqspi);
   1223			ret = -EIO;
   1224			goto complete_xfer;
   1225		}
   1226
   1227		msg->actual_length += xfer->len + dummy_bytes;
   1228
   1229complete_xfer:
   1230		if (ret < 0) {
   1231			tegra_qspi_transfer_end(spi);
   1232			spi_transfer_delay_exec(xfer);
   1233			goto exit;
   1234		}
   1235
   1236		if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
   1237			/* de-activate CS after last transfer only when cs_change is not set */
   1238			if (!xfer->cs_change) {
   1239				tegra_qspi_transfer_end(spi);
   1240				spi_transfer_delay_exec(xfer);
   1241			}
   1242		} else if (xfer->cs_change) {
   1243			 /* de-activated CS between the transfers only when cs_change is set */
   1244			tegra_qspi_transfer_end(spi);
   1245			spi_transfer_delay_exec(xfer);
   1246		}
   1247	}
   1248
   1249	ret = 0;
   1250exit:
   1251	msg->status = ret;
   1252
   1253	return ret;
   1254}
   1255
   1256static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
   1257					struct spi_message *msg)
   1258{
   1259	int transfer_count = 0;
   1260	struct spi_transfer *xfer;
   1261
   1262	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
   1263		transfer_count++;
   1264	}
   1265	if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
   1266		return false;
   1267	xfer = list_first_entry(&msg->transfers, typeof(*xfer),
   1268				transfer_list);
   1269	if (xfer->len > 2)
   1270		return false;
   1271	xfer = list_next_entry(xfer, transfer_list);
   1272	if (xfer->len > 4 || xfer->len < 3)
   1273		return false;
   1274	xfer = list_next_entry(xfer, transfer_list);
   1275	if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
   1276		return false;
   1277
   1278	return true;
   1279}
   1280
   1281static int tegra_qspi_transfer_one_message(struct spi_master *master,
   1282					   struct spi_message *msg)
   1283{
   1284	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
   1285	int ret;
   1286
   1287	if (tegra_qspi_validate_cmb_seq(tqspi, msg))
   1288		ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
   1289	else
   1290		ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
   1291
   1292	spi_finalize_current_message(master);
   1293
   1294	return ret;
   1295}
   1296
   1297static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
   1298{
   1299	struct spi_transfer *t = tqspi->curr_xfer;
   1300	unsigned long flags;
   1301
   1302	spin_lock_irqsave(&tqspi->lock, flags);
   1303
   1304	if (tqspi->tx_status ||  tqspi->rx_status) {
   1305		tegra_qspi_handle_error(tqspi);
   1306		complete(&tqspi->xfer_completion);
   1307		goto exit;
   1308	}
   1309
   1310	if (tqspi->cur_direction & DATA_DIR_RX)
   1311		tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
   1312
   1313	if (tqspi->cur_direction & DATA_DIR_TX)
   1314		tqspi->cur_pos = tqspi->cur_tx_pos;
   1315	else
   1316		tqspi->cur_pos = tqspi->cur_rx_pos;
   1317
   1318	if (tqspi->cur_pos == t->len) {
   1319		complete(&tqspi->xfer_completion);
   1320		goto exit;
   1321	}
   1322
   1323	tegra_qspi_calculate_curr_xfer_param(tqspi, t);
   1324	tegra_qspi_start_cpu_based_transfer(tqspi, t);
   1325exit:
   1326	spin_unlock_irqrestore(&tqspi->lock, flags);
   1327	return IRQ_HANDLED;
   1328}
   1329
   1330static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
   1331{
   1332	struct spi_transfer *t = tqspi->curr_xfer;
   1333	unsigned int total_fifo_words;
   1334	unsigned long flags;
   1335	long wait_status;
   1336	int err = 0;
   1337
   1338	if (tqspi->cur_direction & DATA_DIR_TX) {
   1339		if (tqspi->tx_status) {
   1340			dmaengine_terminate_all(tqspi->tx_dma_chan);
   1341			err += 1;
   1342		} else {
   1343			wait_status = wait_for_completion_interruptible_timeout(
   1344				&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
   1345			if (wait_status <= 0) {
   1346				dmaengine_terminate_all(tqspi->tx_dma_chan);
   1347				dev_err(tqspi->dev, "failed TX DMA transfer\n");
   1348				err += 1;
   1349			}
   1350		}
   1351	}
   1352
   1353	if (tqspi->cur_direction & DATA_DIR_RX) {
   1354		if (tqspi->rx_status) {
   1355			dmaengine_terminate_all(tqspi->rx_dma_chan);
   1356			err += 2;
   1357		} else {
   1358			wait_status = wait_for_completion_interruptible_timeout(
   1359				&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
   1360			if (wait_status <= 0) {
   1361				dmaengine_terminate_all(tqspi->rx_dma_chan);
   1362				dev_err(tqspi->dev, "failed RX DMA transfer\n");
   1363				err += 2;
   1364			}
   1365		}
   1366	}
   1367
   1368	spin_lock_irqsave(&tqspi->lock, flags);
   1369
   1370	if (err) {
   1371		tegra_qspi_dma_unmap_xfer(tqspi, t);
   1372		tegra_qspi_handle_error(tqspi);
   1373		complete(&tqspi->xfer_completion);
   1374		goto exit;
   1375	}
   1376
   1377	if (tqspi->cur_direction & DATA_DIR_RX)
   1378		tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
   1379
   1380	if (tqspi->cur_direction & DATA_DIR_TX)
   1381		tqspi->cur_pos = tqspi->cur_tx_pos;
   1382	else
   1383		tqspi->cur_pos = tqspi->cur_rx_pos;
   1384
   1385	if (tqspi->cur_pos == t->len) {
   1386		tegra_qspi_dma_unmap_xfer(tqspi, t);
   1387		complete(&tqspi->xfer_completion);
   1388		goto exit;
   1389	}
   1390
   1391	tegra_qspi_dma_unmap_xfer(tqspi, t);
   1392
   1393	/* continue transfer in current message */
   1394	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
   1395	if (total_fifo_words > QSPI_FIFO_DEPTH)
   1396		err = tegra_qspi_start_dma_based_transfer(tqspi, t);
   1397	else
   1398		err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
   1399
   1400exit:
   1401	spin_unlock_irqrestore(&tqspi->lock, flags);
   1402	return IRQ_HANDLED;
   1403}
   1404
   1405static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
   1406{
   1407	struct tegra_qspi *tqspi = context_data;
   1408
   1409	tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
   1410
   1411	if (tqspi->cur_direction & DATA_DIR_TX)
   1412		tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
   1413
   1414	if (tqspi->cur_direction & DATA_DIR_RX)
   1415		tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
   1416
   1417	tegra_qspi_mask_clear_irq(tqspi);
   1418
   1419	if (!tqspi->is_curr_dma_xfer)
   1420		return handle_cpu_based_xfer(tqspi);
   1421
   1422	return handle_dma_based_xfer(tqspi);
   1423}
   1424
   1425static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
   1426	.has_dma = true,
   1427	.cmb_xfer_capable = false,
   1428};
   1429
   1430static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
   1431	.has_dma = true,
   1432	.cmb_xfer_capable = true,
   1433};
   1434
   1435static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
   1436	.has_dma = false,
   1437	.cmb_xfer_capable = true,
   1438};
   1439
   1440static const struct of_device_id tegra_qspi_of_match[] = {
   1441	{
   1442		.compatible = "nvidia,tegra210-qspi",
   1443		.data	    = &tegra210_qspi_soc_data,
   1444	}, {
   1445		.compatible = "nvidia,tegra186-qspi",
   1446		.data	    = &tegra186_qspi_soc_data,
   1447	}, {
   1448		.compatible = "nvidia,tegra194-qspi",
   1449		.data	    = &tegra186_qspi_soc_data,
   1450	}, {
   1451		.compatible = "nvidia,tegra234-qspi",
   1452		.data	    = &tegra234_qspi_soc_data,
   1453	},
   1454	{}
   1455};
   1456
   1457MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
   1458
   1459#ifdef CONFIG_ACPI
   1460static const struct acpi_device_id tegra_qspi_acpi_match[] = {
   1461	{
   1462		.id = "NVDA1213",
   1463		.driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
   1464	}, {
   1465		.id = "NVDA1313",
   1466		.driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
   1467	}, {
   1468		.id = "NVDA1413",
   1469		.driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
   1470	},
   1471	{}
   1472};
   1473
   1474MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
   1475#endif
   1476
   1477static int tegra_qspi_probe(struct platform_device *pdev)
   1478{
   1479	struct spi_master	*master;
   1480	struct tegra_qspi	*tqspi;
   1481	struct resource		*r;
   1482	int ret, qspi_irq;
   1483	int bus_num;
   1484
   1485	master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
   1486	if (!master)
   1487		return -ENOMEM;
   1488
   1489	platform_set_drvdata(pdev, master);
   1490	tqspi = spi_master_get_devdata(master);
   1491
   1492	master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
   1493			    SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
   1494	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
   1495	master->setup = tegra_qspi_setup;
   1496	master->transfer_one_message = tegra_qspi_transfer_one_message;
   1497	master->num_chipselect = 1;
   1498	master->auto_runtime_pm = true;
   1499
   1500	bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
   1501	if (bus_num >= 0)
   1502		master->bus_num = bus_num;
   1503
   1504	tqspi->master = master;
   1505	tqspi->dev = &pdev->dev;
   1506	spin_lock_init(&tqspi->lock);
   1507
   1508	tqspi->soc_data = device_get_match_data(&pdev->dev);
   1509	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1510	tqspi->base = devm_ioremap_resource(&pdev->dev, r);
   1511	if (IS_ERR(tqspi->base))
   1512		return PTR_ERR(tqspi->base);
   1513
   1514	tqspi->phys = r->start;
   1515	qspi_irq = platform_get_irq(pdev, 0);
   1516	if (qspi_irq < 0)
   1517		return qspi_irq;
   1518	tqspi->irq = qspi_irq;
   1519
   1520	if (!has_acpi_companion(tqspi->dev)) {
   1521		tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
   1522		if (IS_ERR(tqspi->clk)) {
   1523			ret = PTR_ERR(tqspi->clk);
   1524			dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
   1525			return ret;
   1526		}
   1527
   1528	}
   1529
   1530	tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
   1531	tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
   1532
   1533	ret = tegra_qspi_init_dma(tqspi);
   1534	if (ret < 0)
   1535		return ret;
   1536
   1537	if (tqspi->use_dma)
   1538		tqspi->max_buf_size = tqspi->dma_buf_size;
   1539
   1540	init_completion(&tqspi->tx_dma_complete);
   1541	init_completion(&tqspi->rx_dma_complete);
   1542	init_completion(&tqspi->xfer_completion);
   1543
   1544	pm_runtime_enable(&pdev->dev);
   1545	ret = pm_runtime_resume_and_get(&pdev->dev);
   1546	if (ret < 0) {
   1547		dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
   1548		goto exit_pm_disable;
   1549	}
   1550
   1551	if (device_reset(tqspi->dev) < 0)
   1552		dev_warn_once(tqspi->dev, "device reset failed\n");
   1553
   1554	tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
   1555	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
   1556	tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
   1557	tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
   1558	tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
   1559
   1560	pm_runtime_put(&pdev->dev);
   1561
   1562	ret = request_threaded_irq(tqspi->irq, NULL,
   1563				   tegra_qspi_isr_thread, IRQF_ONESHOT,
   1564				   dev_name(&pdev->dev), tqspi);
   1565	if (ret < 0) {
   1566		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
   1567		goto exit_pm_disable;
   1568	}
   1569
   1570	master->dev.of_node = pdev->dev.of_node;
   1571	ret = spi_register_master(master);
   1572	if (ret < 0) {
   1573		dev_err(&pdev->dev, "failed to register master: %d\n", ret);
   1574		goto exit_free_irq;
   1575	}
   1576
   1577	return 0;
   1578
   1579exit_free_irq:
   1580	free_irq(qspi_irq, tqspi);
   1581exit_pm_disable:
   1582	pm_runtime_force_suspend(&pdev->dev);
   1583	tegra_qspi_deinit_dma(tqspi);
   1584	return ret;
   1585}
   1586
   1587static int tegra_qspi_remove(struct platform_device *pdev)
   1588{
   1589	struct spi_master *master = platform_get_drvdata(pdev);
   1590	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
   1591
   1592	spi_unregister_master(master);
   1593	free_irq(tqspi->irq, tqspi);
   1594	pm_runtime_force_suspend(&pdev->dev);
   1595	tegra_qspi_deinit_dma(tqspi);
   1596
   1597	return 0;
   1598}
   1599
   1600static int __maybe_unused tegra_qspi_suspend(struct device *dev)
   1601{
   1602	struct spi_master *master = dev_get_drvdata(dev);
   1603
   1604	return spi_master_suspend(master);
   1605}
   1606
   1607static int __maybe_unused tegra_qspi_resume(struct device *dev)
   1608{
   1609	struct spi_master *master = dev_get_drvdata(dev);
   1610	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
   1611	int ret;
   1612
   1613	ret = pm_runtime_resume_and_get(dev);
   1614	if (ret < 0) {
   1615		dev_err(dev, "failed to get runtime PM: %d\n", ret);
   1616		return ret;
   1617	}
   1618
   1619	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
   1620	tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
   1621	pm_runtime_put(dev);
   1622
   1623	return spi_master_resume(master);
   1624}
   1625
   1626static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
   1627{
   1628	struct spi_master *master = dev_get_drvdata(dev);
   1629	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
   1630
   1631	/* Runtime pm disabled with ACPI */
   1632	if (has_acpi_companion(tqspi->dev))
   1633		return 0;
   1634	/* flush all write which are in PPSB queue by reading back */
   1635	tegra_qspi_readl(tqspi, QSPI_COMMAND1);
   1636
   1637	clk_disable_unprepare(tqspi->clk);
   1638
   1639	return 0;
   1640}
   1641
   1642static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
   1643{
   1644	struct spi_master *master = dev_get_drvdata(dev);
   1645	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
   1646	int ret;
   1647
   1648	/* Runtime pm disabled with ACPI */
   1649	if (has_acpi_companion(tqspi->dev))
   1650		return 0;
   1651	ret = clk_prepare_enable(tqspi->clk);
   1652	if (ret < 0)
   1653		dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
   1654
   1655	return ret;
   1656}
   1657
   1658static const struct dev_pm_ops tegra_qspi_pm_ops = {
   1659	SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
   1660	SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
   1661};
   1662
   1663static struct platform_driver tegra_qspi_driver = {
   1664	.driver = {
   1665		.name		= "tegra-qspi",
   1666		.pm		= &tegra_qspi_pm_ops,
   1667		.of_match_table	= tegra_qspi_of_match,
   1668		.acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
   1669	},
   1670	.probe =	tegra_qspi_probe,
   1671	.remove =	tegra_qspi_remove,
   1672};
   1673module_platform_driver(tegra_qspi_driver);
   1674
   1675MODULE_ALIAS("platform:qspi-tegra");
   1676MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
   1677MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
   1678MODULE_LICENSE("GPL v2");