cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-imx.c (51109B)


      1// SPDX-License-Identifier: GPL-2.0+
      2// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
      3// Copyright (C) 2008 Juergen Beisert
      4
      5#include <linux/clk.h>
      6#include <linux/completion.h>
      7#include <linux/delay.h>
      8#include <linux/dmaengine.h>
      9#include <linux/dma-mapping.h>
     10#include <linux/err.h>
     11#include <linux/interrupt.h>
     12#include <linux/io.h>
     13#include <linux/irq.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/pinctrl/consumer.h>
     17#include <linux/platform_device.h>
     18#include <linux/pm_runtime.h>
     19#include <linux/slab.h>
     20#include <linux/spi/spi.h>
     21#include <linux/types.h>
     22#include <linux/of.h>
     23#include <linux/of_device.h>
     24#include <linux/property.h>
     25
     26#include <linux/dma/imx-dma.h>
     27
     28#define DRIVER_NAME "spi_imx"
     29
     30static bool use_dma = true;
     31module_param(use_dma, bool, 0644);
     32MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
     33
     34/* define polling limits */
     35static unsigned int polling_limit_us = 30;
     36module_param(polling_limit_us, uint, 0664);
     37MODULE_PARM_DESC(polling_limit_us,
     38		 "time in us to run a transfer in polling mode\n");
     39
     40#define MXC_RPM_TIMEOUT		2000 /* 2000ms */
     41
     42#define MXC_CSPIRXDATA		0x00
     43#define MXC_CSPITXDATA		0x04
     44#define MXC_CSPICTRL		0x08
     45#define MXC_CSPIINT		0x0c
     46#define MXC_RESET		0x1c
     47
     48/* generic defines to abstract from the different register layouts */
     49#define MXC_INT_RR	(1 << 0) /* Receive data ready interrupt */
     50#define MXC_INT_TE	(1 << 1) /* Transmit FIFO empty interrupt */
     51#define MXC_INT_RDR	BIT(4) /* Receive date threshold interrupt */
     52
     53/* The maximum bytes that a sdma BD can transfer. */
     54#define MAX_SDMA_BD_BYTES (1 << 15)
     55#define MX51_ECSPI_CTRL_MAX_BURST	512
     56/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
     57#define MX53_MAX_TRANSFER_BYTES		512
     58
     59enum spi_imx_devtype {
     60	IMX1_CSPI,
     61	IMX21_CSPI,
     62	IMX27_CSPI,
     63	IMX31_CSPI,
     64	IMX35_CSPI,	/* CSPI on all i.mx except above */
     65	IMX51_ECSPI,	/* ECSPI on i.mx51 */
     66	IMX53_ECSPI,	/* ECSPI on i.mx53 and later */
     67};
     68
     69struct spi_imx_data;
     70
     71struct spi_imx_devtype_data {
     72	void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
     73	int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
     74	int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
     75	void (*trigger)(struct spi_imx_data *spi_imx);
     76	int (*rx_available)(struct spi_imx_data *spi_imx);
     77	void (*reset)(struct spi_imx_data *spi_imx);
     78	void (*setup_wml)(struct spi_imx_data *spi_imx);
     79	void (*disable)(struct spi_imx_data *spi_imx);
     80	void (*disable_dma)(struct spi_imx_data *spi_imx);
     81	bool has_dmamode;
     82	bool has_slavemode;
     83	unsigned int fifo_size;
     84	bool dynamic_burst;
     85	/*
     86	 * ERR009165 fixed or not:
     87	 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
     88	 */
     89	bool tx_glitch_fixed;
     90	enum spi_imx_devtype devtype;
     91};
     92
     93struct spi_imx_data {
     94	struct spi_controller *controller;
     95	struct device *dev;
     96
     97	struct completion xfer_done;
     98	void __iomem *base;
     99	unsigned long base_phys;
    100
    101	struct clk *clk_per;
    102	struct clk *clk_ipg;
    103	unsigned long spi_clk;
    104	unsigned int spi_bus_clk;
    105
    106	unsigned int bits_per_word;
    107	unsigned int spi_drctl;
    108
    109	unsigned int count, remainder;
    110	void (*tx)(struct spi_imx_data *spi_imx);
    111	void (*rx)(struct spi_imx_data *spi_imx);
    112	void *rx_buf;
    113	const void *tx_buf;
    114	unsigned int txfifo; /* number of words pushed in tx FIFO */
    115	unsigned int dynamic_burst;
    116	bool rx_only;
    117
    118	/* Slave mode */
    119	bool slave_mode;
    120	bool slave_aborted;
    121	unsigned int slave_burst;
    122
    123	/* DMA */
    124	bool usedma;
    125	u32 wml;
    126	struct completion dma_rx_completion;
    127	struct completion dma_tx_completion;
    128
    129	const struct spi_imx_devtype_data *devtype_data;
    130};
    131
    132static inline int is_imx27_cspi(struct spi_imx_data *d)
    133{
    134	return d->devtype_data->devtype == IMX27_CSPI;
    135}
    136
    137static inline int is_imx35_cspi(struct spi_imx_data *d)
    138{
    139	return d->devtype_data->devtype == IMX35_CSPI;
    140}
    141
    142static inline int is_imx51_ecspi(struct spi_imx_data *d)
    143{
    144	return d->devtype_data->devtype == IMX51_ECSPI;
    145}
    146
    147static inline int is_imx53_ecspi(struct spi_imx_data *d)
    148{
    149	return d->devtype_data->devtype == IMX53_ECSPI;
    150}
    151
    152#define MXC_SPI_BUF_RX(type)						\
    153static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)		\
    154{									\
    155	unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);	\
    156									\
    157	if (spi_imx->rx_buf) {						\
    158		*(type *)spi_imx->rx_buf = val;				\
    159		spi_imx->rx_buf += sizeof(type);			\
    160	}								\
    161									\
    162	spi_imx->remainder -= sizeof(type);				\
    163}
    164
    165#define MXC_SPI_BUF_TX(type)						\
    166static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)		\
    167{									\
    168	type val = 0;							\
    169									\
    170	if (spi_imx->tx_buf) {						\
    171		val = *(type *)spi_imx->tx_buf;				\
    172		spi_imx->tx_buf += sizeof(type);			\
    173	}								\
    174									\
    175	spi_imx->count -= sizeof(type);					\
    176									\
    177	writel(val, spi_imx->base + MXC_CSPITXDATA);			\
    178}
    179
    180MXC_SPI_BUF_RX(u8)
    181MXC_SPI_BUF_TX(u8)
    182MXC_SPI_BUF_RX(u16)
    183MXC_SPI_BUF_TX(u16)
    184MXC_SPI_BUF_RX(u32)
    185MXC_SPI_BUF_TX(u32)
    186
    187/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
    188 * (which is currently not the case in this driver)
    189 */
    190static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
    191	256, 384, 512, 768, 1024};
    192
    193/* MX21, MX27 */
    194static unsigned int spi_imx_clkdiv_1(unsigned int fin,
    195		unsigned int fspi, unsigned int max, unsigned int *fres)
    196{
    197	int i;
    198
    199	for (i = 2; i < max; i++)
    200		if (fspi * mxc_clkdivs[i] >= fin)
    201			break;
    202
    203	*fres = fin / mxc_clkdivs[i];
    204	return i;
    205}
    206
    207/* MX1, MX31, MX35, MX51 CSPI */
    208static unsigned int spi_imx_clkdiv_2(unsigned int fin,
    209		unsigned int fspi, unsigned int *fres)
    210{
    211	int i, div = 4;
    212
    213	for (i = 0; i < 7; i++) {
    214		if (fspi * div >= fin)
    215			goto out;
    216		div <<= 1;
    217	}
    218
    219out:
    220	*fres = fin / div;
    221	return i;
    222}
    223
    224static int spi_imx_bytes_per_word(const int bits_per_word)
    225{
    226	if (bits_per_word <= 8)
    227		return 1;
    228	else if (bits_per_word <= 16)
    229		return 2;
    230	else
    231		return 4;
    232}
    233
    234static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
    235			 struct spi_transfer *transfer)
    236{
    237	struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
    238
    239	if (!use_dma || controller->fallback)
    240		return false;
    241
    242	if (!controller->dma_rx)
    243		return false;
    244
    245	if (spi_imx->slave_mode)
    246		return false;
    247
    248	if (transfer->len < spi_imx->devtype_data->fifo_size)
    249		return false;
    250
    251	spi_imx->dynamic_burst = 0;
    252
    253	return true;
    254}
    255
    256#define MX51_ECSPI_CTRL		0x08
    257#define MX51_ECSPI_CTRL_ENABLE		(1 <<  0)
    258#define MX51_ECSPI_CTRL_XCH		(1 <<  2)
    259#define MX51_ECSPI_CTRL_SMC		(1 << 3)
    260#define MX51_ECSPI_CTRL_MODE_MASK	(0xf << 4)
    261#define MX51_ECSPI_CTRL_DRCTL(drctl)	((drctl) << 16)
    262#define MX51_ECSPI_CTRL_POSTDIV_OFFSET	8
    263#define MX51_ECSPI_CTRL_PREDIV_OFFSET	12
    264#define MX51_ECSPI_CTRL_CS(cs)		((cs) << 18)
    265#define MX51_ECSPI_CTRL_BL_OFFSET	20
    266#define MX51_ECSPI_CTRL_BL_MASK		(0xfff << 20)
    267
    268#define MX51_ECSPI_CONFIG	0x0c
    269#define MX51_ECSPI_CONFIG_SCLKPHA(cs)	(1 << ((cs) +  0))
    270#define MX51_ECSPI_CONFIG_SCLKPOL(cs)	(1 << ((cs) +  4))
    271#define MX51_ECSPI_CONFIG_SBBCTRL(cs)	(1 << ((cs) +  8))
    272#define MX51_ECSPI_CONFIG_SSBPOL(cs)	(1 << ((cs) + 12))
    273#define MX51_ECSPI_CONFIG_SCLKCTL(cs)	(1 << ((cs) + 20))
    274
    275#define MX51_ECSPI_INT		0x10
    276#define MX51_ECSPI_INT_TEEN		(1 <<  0)
    277#define MX51_ECSPI_INT_RREN		(1 <<  3)
    278#define MX51_ECSPI_INT_RDREN		(1 <<  4)
    279
    280#define MX51_ECSPI_DMA		0x14
    281#define MX51_ECSPI_DMA_TX_WML(wml)	((wml) & 0x3f)
    282#define MX51_ECSPI_DMA_RX_WML(wml)	(((wml) & 0x3f) << 16)
    283#define MX51_ECSPI_DMA_RXT_WML(wml)	(((wml) & 0x3f) << 24)
    284
    285#define MX51_ECSPI_DMA_TEDEN		(1 << 7)
    286#define MX51_ECSPI_DMA_RXDEN		(1 << 23)
    287#define MX51_ECSPI_DMA_RXTDEN		(1 << 31)
    288
    289#define MX51_ECSPI_STAT		0x18
    290#define MX51_ECSPI_STAT_RR		(1 <<  3)
    291
    292#define MX51_ECSPI_TESTREG	0x20
    293#define MX51_ECSPI_TESTREG_LBC	BIT(31)
    294
    295static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
    296{
    297	unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
    298
    299	if (spi_imx->rx_buf) {
    300#ifdef __LITTLE_ENDIAN
    301		unsigned int bytes_per_word;
    302
    303		bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
    304		if (bytes_per_word == 1)
    305			swab32s(&val);
    306		else if (bytes_per_word == 2)
    307			swahw32s(&val);
    308#endif
    309		*(u32 *)spi_imx->rx_buf = val;
    310		spi_imx->rx_buf += sizeof(u32);
    311	}
    312
    313	spi_imx->remainder -= sizeof(u32);
    314}
    315
    316static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
    317{
    318	int unaligned;
    319	u32 val;
    320
    321	unaligned = spi_imx->remainder % 4;
    322
    323	if (!unaligned) {
    324		spi_imx_buf_rx_swap_u32(spi_imx);
    325		return;
    326	}
    327
    328	if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
    329		spi_imx_buf_rx_u16(spi_imx);
    330		return;
    331	}
    332
    333	val = readl(spi_imx->base + MXC_CSPIRXDATA);
    334
    335	while (unaligned--) {
    336		if (spi_imx->rx_buf) {
    337			*(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
    338			spi_imx->rx_buf++;
    339		}
    340		spi_imx->remainder--;
    341	}
    342}
    343
    344static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
    345{
    346	u32 val = 0;
    347#ifdef __LITTLE_ENDIAN
    348	unsigned int bytes_per_word;
    349#endif
    350
    351	if (spi_imx->tx_buf) {
    352		val = *(u32 *)spi_imx->tx_buf;
    353		spi_imx->tx_buf += sizeof(u32);
    354	}
    355
    356	spi_imx->count -= sizeof(u32);
    357#ifdef __LITTLE_ENDIAN
    358	bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
    359
    360	if (bytes_per_word == 1)
    361		swab32s(&val);
    362	else if (bytes_per_word == 2)
    363		swahw32s(&val);
    364#endif
    365	writel(val, spi_imx->base + MXC_CSPITXDATA);
    366}
    367
    368static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
    369{
    370	int unaligned;
    371	u32 val = 0;
    372
    373	unaligned = spi_imx->count % 4;
    374
    375	if (!unaligned) {
    376		spi_imx_buf_tx_swap_u32(spi_imx);
    377		return;
    378	}
    379
    380	if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
    381		spi_imx_buf_tx_u16(spi_imx);
    382		return;
    383	}
    384
    385	while (unaligned--) {
    386		if (spi_imx->tx_buf) {
    387			val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
    388			spi_imx->tx_buf++;
    389		}
    390		spi_imx->count--;
    391	}
    392
    393	writel(val, spi_imx->base + MXC_CSPITXDATA);
    394}
    395
    396static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
    397{
    398	u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
    399
    400	if (spi_imx->rx_buf) {
    401		int n_bytes = spi_imx->slave_burst % sizeof(val);
    402
    403		if (!n_bytes)
    404			n_bytes = sizeof(val);
    405
    406		memcpy(spi_imx->rx_buf,
    407		       ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
    408
    409		spi_imx->rx_buf += n_bytes;
    410		spi_imx->slave_burst -= n_bytes;
    411	}
    412
    413	spi_imx->remainder -= sizeof(u32);
    414}
    415
    416static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
    417{
    418	u32 val = 0;
    419	int n_bytes = spi_imx->count % sizeof(val);
    420
    421	if (!n_bytes)
    422		n_bytes = sizeof(val);
    423
    424	if (spi_imx->tx_buf) {
    425		memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
    426		       spi_imx->tx_buf, n_bytes);
    427		val = cpu_to_be32(val);
    428		spi_imx->tx_buf += n_bytes;
    429	}
    430
    431	spi_imx->count -= n_bytes;
    432
    433	writel(val, spi_imx->base + MXC_CSPITXDATA);
    434}
    435
    436/* MX51 eCSPI */
    437static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
    438				      unsigned int fspi, unsigned int *fres)
    439{
    440	/*
    441	 * there are two 4-bit dividers, the pre-divider divides by
    442	 * $pre, the post-divider by 2^$post
    443	 */
    444	unsigned int pre, post;
    445	unsigned int fin = spi_imx->spi_clk;
    446
    447	if (unlikely(fspi > fin))
    448		return 0;
    449
    450	post = fls(fin) - fls(fspi);
    451	if (fin > fspi << post)
    452		post++;
    453
    454	/* now we have: (fin <= fspi << post) with post being minimal */
    455
    456	post = max(4U, post) - 4;
    457	if (unlikely(post > 0xf)) {
    458		dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
    459				fspi, fin);
    460		return 0xff;
    461	}
    462
    463	pre = DIV_ROUND_UP(fin, fspi << post) - 1;
    464
    465	dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
    466			__func__, fin, fspi, post, pre);
    467
    468	/* Resulting frequency for the SCLK line. */
    469	*fres = (fin / (pre + 1)) >> post;
    470
    471	return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
    472		(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
    473}
    474
    475static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
    476{
    477	unsigned int val = 0;
    478
    479	if (enable & MXC_INT_TE)
    480		val |= MX51_ECSPI_INT_TEEN;
    481
    482	if (enable & MXC_INT_RR)
    483		val |= MX51_ECSPI_INT_RREN;
    484
    485	if (enable & MXC_INT_RDR)
    486		val |= MX51_ECSPI_INT_RDREN;
    487
    488	writel(val, spi_imx->base + MX51_ECSPI_INT);
    489}
    490
    491static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
    492{
    493	u32 reg;
    494
    495	reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
    496	reg |= MX51_ECSPI_CTRL_XCH;
    497	writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
    498}
    499
    500static void mx51_disable_dma(struct spi_imx_data *spi_imx)
    501{
    502	writel(0, spi_imx->base + MX51_ECSPI_DMA);
    503}
    504
    505static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
    506{
    507	u32 ctrl;
    508
    509	ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
    510	ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
    511	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
    512}
    513
    514static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
    515				      struct spi_message *msg)
    516{
    517	struct spi_device *spi = msg->spi;
    518	struct spi_transfer *xfer;
    519	u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
    520	u32 min_speed_hz = ~0U;
    521	u32 testreg, delay;
    522	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
    523	u32 current_cfg = cfg;
    524
    525	/* set Master or Slave mode */
    526	if (spi_imx->slave_mode)
    527		ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
    528	else
    529		ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
    530
    531	/*
    532	 * Enable SPI_RDY handling (falling edge/level triggered).
    533	 */
    534	if (spi->mode & SPI_READY)
    535		ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
    536
    537	/* set chip select to use */
    538	ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
    539
    540	/*
    541	 * The ctrl register must be written first, with the EN bit set other
    542	 * registers must not be written to.
    543	 */
    544	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
    545
    546	testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
    547	if (spi->mode & SPI_LOOP)
    548		testreg |= MX51_ECSPI_TESTREG_LBC;
    549	else
    550		testreg &= ~MX51_ECSPI_TESTREG_LBC;
    551	writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
    552
    553	/*
    554	 * eCSPI burst completion by Chip Select signal in Slave mode
    555	 * is not functional for imx53 Soc, config SPI burst completed when
    556	 * BURST_LENGTH + 1 bits are received
    557	 */
    558	if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
    559		cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
    560	else
    561		cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
    562
    563	if (spi->mode & SPI_CPOL) {
    564		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
    565		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
    566	} else {
    567		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
    568		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
    569	}
    570
    571	if (spi->mode & SPI_CS_HIGH)
    572		cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
    573	else
    574		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
    575
    576	if (cfg == current_cfg)
    577		return 0;
    578
    579	writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
    580
    581	/*
    582	 * Wait until the changes in the configuration register CONFIGREG
    583	 * propagate into the hardware. It takes exactly one tick of the
    584	 * SCLK clock, but we will wait two SCLK clock just to be sure. The
    585	 * effect of the delay it takes for the hardware to apply changes
    586	 * is noticable if the SCLK clock run very slow. In such a case, if
    587	 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
    588	 * be asserted before the SCLK polarity changes, which would disrupt
    589	 * the SPI communication as the device on the other end would consider
    590	 * the change of SCLK polarity as a clock tick already.
    591	 *
    592	 * Because spi_imx->spi_bus_clk is only set in prepare_message
    593	 * callback, iterate over all the transfers in spi_message, find the
    594	 * one with lowest bus frequency, and use that bus frequency for the
    595	 * delay calculation. In case all transfers have speed_hz == 0, then
    596	 * min_speed_hz is ~0 and the resulting delay is zero.
    597	 */
    598	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
    599		if (!xfer->speed_hz)
    600			continue;
    601		min_speed_hz = min(xfer->speed_hz, min_speed_hz);
    602	}
    603
    604	delay = (2 * 1000000) / min_speed_hz;
    605	if (likely(delay < 10))	/* SCLK is faster than 200 kHz */
    606		udelay(delay);
    607	else			/* SCLK is _very_ slow */
    608		usleep_range(delay, delay + 10);
    609
    610	return 0;
    611}
    612
    613static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
    614				struct spi_device *spi)
    615{
    616	bool cpha = (spi->mode & SPI_CPHA);
    617	bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
    618	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
    619
    620	/* Flip cpha logical value iff flip_cpha */
    621	cpha ^= flip_cpha;
    622
    623	if (cpha)
    624		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
    625	else
    626		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
    627
    628	writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
    629}
    630
    631static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
    632				       struct spi_device *spi)
    633{
    634	u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
    635	u32 clk;
    636
    637	/* Clear BL field and set the right value */
    638	ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
    639	if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
    640		ctrl |= (spi_imx->slave_burst * 8 - 1)
    641			<< MX51_ECSPI_CTRL_BL_OFFSET;
    642	else
    643		ctrl |= (spi_imx->bits_per_word - 1)
    644			<< MX51_ECSPI_CTRL_BL_OFFSET;
    645
    646	/* set clock speed */
    647	ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
    648		  0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
    649	ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
    650	spi_imx->spi_bus_clk = clk;
    651
    652	mx51_configure_cpha(spi_imx, spi);
    653
    654	/*
    655	 * ERR009165: work in XHC mode instead of SMC as PIO on the chips
    656	 * before i.mx6ul.
    657	 */
    658	if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
    659		ctrl |= MX51_ECSPI_CTRL_SMC;
    660	else
    661		ctrl &= ~MX51_ECSPI_CTRL_SMC;
    662
    663	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
    664
    665	return 0;
    666}
    667
    668static void mx51_setup_wml(struct spi_imx_data *spi_imx)
    669{
    670	u32 tx_wml = 0;
    671
    672	if (spi_imx->devtype_data->tx_glitch_fixed)
    673		tx_wml = spi_imx->wml;
    674	/*
    675	 * Configure the DMA register: setup the watermark
    676	 * and enable DMA request.
    677	 */
    678	writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
    679		MX51_ECSPI_DMA_TX_WML(tx_wml) |
    680		MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
    681		MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
    682		MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
    683}
    684
    685static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
    686{
    687	return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
    688}
    689
    690static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
    691{
    692	/* drain receive buffer */
    693	while (mx51_ecspi_rx_available(spi_imx))
    694		readl(spi_imx->base + MXC_CSPIRXDATA);
    695}
    696
    697#define MX31_INTREG_TEEN	(1 << 0)
    698#define MX31_INTREG_RREN	(1 << 3)
    699
    700#define MX31_CSPICTRL_ENABLE	(1 << 0)
    701#define MX31_CSPICTRL_MASTER	(1 << 1)
    702#define MX31_CSPICTRL_XCH	(1 << 2)
    703#define MX31_CSPICTRL_SMC	(1 << 3)
    704#define MX31_CSPICTRL_POL	(1 << 4)
    705#define MX31_CSPICTRL_PHA	(1 << 5)
    706#define MX31_CSPICTRL_SSCTL	(1 << 6)
    707#define MX31_CSPICTRL_SSPOL	(1 << 7)
    708#define MX31_CSPICTRL_BC_SHIFT	8
    709#define MX35_CSPICTRL_BL_SHIFT	20
    710#define MX31_CSPICTRL_CS_SHIFT	24
    711#define MX35_CSPICTRL_CS_SHIFT	12
    712#define MX31_CSPICTRL_DR_SHIFT	16
    713
    714#define MX31_CSPI_DMAREG	0x10
    715#define MX31_DMAREG_RH_DEN	(1<<4)
    716#define MX31_DMAREG_TH_DEN	(1<<1)
    717
    718#define MX31_CSPISTATUS		0x14
    719#define MX31_STATUS_RR		(1 << 3)
    720
    721#define MX31_CSPI_TESTREG	0x1C
    722#define MX31_TEST_LBC		(1 << 14)
    723
    724/* These functions also work for the i.MX35, but be aware that
    725 * the i.MX35 has a slightly different register layout for bits
    726 * we do not use here.
    727 */
    728static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
    729{
    730	unsigned int val = 0;
    731
    732	if (enable & MXC_INT_TE)
    733		val |= MX31_INTREG_TEEN;
    734	if (enable & MXC_INT_RR)
    735		val |= MX31_INTREG_RREN;
    736
    737	writel(val, spi_imx->base + MXC_CSPIINT);
    738}
    739
    740static void mx31_trigger(struct spi_imx_data *spi_imx)
    741{
    742	unsigned int reg;
    743
    744	reg = readl(spi_imx->base + MXC_CSPICTRL);
    745	reg |= MX31_CSPICTRL_XCH;
    746	writel(reg, spi_imx->base + MXC_CSPICTRL);
    747}
    748
    749static int mx31_prepare_message(struct spi_imx_data *spi_imx,
    750				struct spi_message *msg)
    751{
    752	return 0;
    753}
    754
    755static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
    756				 struct spi_device *spi)
    757{
    758	unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
    759	unsigned int clk;
    760
    761	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
    762		MX31_CSPICTRL_DR_SHIFT;
    763	spi_imx->spi_bus_clk = clk;
    764
    765	if (is_imx35_cspi(spi_imx)) {
    766		reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
    767		reg |= MX31_CSPICTRL_SSCTL;
    768	} else {
    769		reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
    770	}
    771
    772	if (spi->mode & SPI_CPHA)
    773		reg |= MX31_CSPICTRL_PHA;
    774	if (spi->mode & SPI_CPOL)
    775		reg |= MX31_CSPICTRL_POL;
    776	if (spi->mode & SPI_CS_HIGH)
    777		reg |= MX31_CSPICTRL_SSPOL;
    778	if (!spi->cs_gpiod)
    779		reg |= (spi->chip_select) <<
    780			(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
    781						  MX31_CSPICTRL_CS_SHIFT);
    782
    783	if (spi_imx->usedma)
    784		reg |= MX31_CSPICTRL_SMC;
    785
    786	writel(reg, spi_imx->base + MXC_CSPICTRL);
    787
    788	reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
    789	if (spi->mode & SPI_LOOP)
    790		reg |= MX31_TEST_LBC;
    791	else
    792		reg &= ~MX31_TEST_LBC;
    793	writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
    794
    795	if (spi_imx->usedma) {
    796		/*
    797		 * configure DMA requests when RXFIFO is half full and
    798		 * when TXFIFO is half empty
    799		 */
    800		writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
    801			spi_imx->base + MX31_CSPI_DMAREG);
    802	}
    803
    804	return 0;
    805}
    806
    807static int mx31_rx_available(struct spi_imx_data *spi_imx)
    808{
    809	return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
    810}
    811
    812static void mx31_reset(struct spi_imx_data *spi_imx)
    813{
    814	/* drain receive buffer */
    815	while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
    816		readl(spi_imx->base + MXC_CSPIRXDATA);
    817}
    818
    819#define MX21_INTREG_RR		(1 << 4)
    820#define MX21_INTREG_TEEN	(1 << 9)
    821#define MX21_INTREG_RREN	(1 << 13)
    822
    823#define MX21_CSPICTRL_POL	(1 << 5)
    824#define MX21_CSPICTRL_PHA	(1 << 6)
    825#define MX21_CSPICTRL_SSPOL	(1 << 8)
    826#define MX21_CSPICTRL_XCH	(1 << 9)
    827#define MX21_CSPICTRL_ENABLE	(1 << 10)
    828#define MX21_CSPICTRL_MASTER	(1 << 11)
    829#define MX21_CSPICTRL_DR_SHIFT	14
    830#define MX21_CSPICTRL_CS_SHIFT	19
    831
    832static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
    833{
    834	unsigned int val = 0;
    835
    836	if (enable & MXC_INT_TE)
    837		val |= MX21_INTREG_TEEN;
    838	if (enable & MXC_INT_RR)
    839		val |= MX21_INTREG_RREN;
    840
    841	writel(val, spi_imx->base + MXC_CSPIINT);
    842}
    843
    844static void mx21_trigger(struct spi_imx_data *spi_imx)
    845{
    846	unsigned int reg;
    847
    848	reg = readl(spi_imx->base + MXC_CSPICTRL);
    849	reg |= MX21_CSPICTRL_XCH;
    850	writel(reg, spi_imx->base + MXC_CSPICTRL);
    851}
    852
    853static int mx21_prepare_message(struct spi_imx_data *spi_imx,
    854				struct spi_message *msg)
    855{
    856	return 0;
    857}
    858
    859static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
    860				 struct spi_device *spi)
    861{
    862	unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
    863	unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
    864	unsigned int clk;
    865
    866	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
    867		<< MX21_CSPICTRL_DR_SHIFT;
    868	spi_imx->spi_bus_clk = clk;
    869
    870	reg |= spi_imx->bits_per_word - 1;
    871
    872	if (spi->mode & SPI_CPHA)
    873		reg |= MX21_CSPICTRL_PHA;
    874	if (spi->mode & SPI_CPOL)
    875		reg |= MX21_CSPICTRL_POL;
    876	if (spi->mode & SPI_CS_HIGH)
    877		reg |= MX21_CSPICTRL_SSPOL;
    878	if (!spi->cs_gpiod)
    879		reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
    880
    881	writel(reg, spi_imx->base + MXC_CSPICTRL);
    882
    883	return 0;
    884}
    885
    886static int mx21_rx_available(struct spi_imx_data *spi_imx)
    887{
    888	return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
    889}
    890
    891static void mx21_reset(struct spi_imx_data *spi_imx)
    892{
    893	writel(1, spi_imx->base + MXC_RESET);
    894}
    895
    896#define MX1_INTREG_RR		(1 << 3)
    897#define MX1_INTREG_TEEN		(1 << 8)
    898#define MX1_INTREG_RREN		(1 << 11)
    899
    900#define MX1_CSPICTRL_POL	(1 << 4)
    901#define MX1_CSPICTRL_PHA	(1 << 5)
    902#define MX1_CSPICTRL_XCH	(1 << 8)
    903#define MX1_CSPICTRL_ENABLE	(1 << 9)
    904#define MX1_CSPICTRL_MASTER	(1 << 10)
    905#define MX1_CSPICTRL_DR_SHIFT	13
    906
    907static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
    908{
    909	unsigned int val = 0;
    910
    911	if (enable & MXC_INT_TE)
    912		val |= MX1_INTREG_TEEN;
    913	if (enable & MXC_INT_RR)
    914		val |= MX1_INTREG_RREN;
    915
    916	writel(val, spi_imx->base + MXC_CSPIINT);
    917}
    918
    919static void mx1_trigger(struct spi_imx_data *spi_imx)
    920{
    921	unsigned int reg;
    922
    923	reg = readl(spi_imx->base + MXC_CSPICTRL);
    924	reg |= MX1_CSPICTRL_XCH;
    925	writel(reg, spi_imx->base + MXC_CSPICTRL);
    926}
    927
    928static int mx1_prepare_message(struct spi_imx_data *spi_imx,
    929			       struct spi_message *msg)
    930{
    931	return 0;
    932}
    933
    934static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
    935				struct spi_device *spi)
    936{
    937	unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
    938	unsigned int clk;
    939
    940	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
    941		MX1_CSPICTRL_DR_SHIFT;
    942	spi_imx->spi_bus_clk = clk;
    943
    944	reg |= spi_imx->bits_per_word - 1;
    945
    946	if (spi->mode & SPI_CPHA)
    947		reg |= MX1_CSPICTRL_PHA;
    948	if (spi->mode & SPI_CPOL)
    949		reg |= MX1_CSPICTRL_POL;
    950
    951	writel(reg, spi_imx->base + MXC_CSPICTRL);
    952
    953	return 0;
    954}
    955
    956static int mx1_rx_available(struct spi_imx_data *spi_imx)
    957{
    958	return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
    959}
    960
    961static void mx1_reset(struct spi_imx_data *spi_imx)
    962{
    963	writel(1, spi_imx->base + MXC_RESET);
    964}
    965
    966static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
    967	.intctrl = mx1_intctrl,
    968	.prepare_message = mx1_prepare_message,
    969	.prepare_transfer = mx1_prepare_transfer,
    970	.trigger = mx1_trigger,
    971	.rx_available = mx1_rx_available,
    972	.reset = mx1_reset,
    973	.fifo_size = 8,
    974	.has_dmamode = false,
    975	.dynamic_burst = false,
    976	.has_slavemode = false,
    977	.devtype = IMX1_CSPI,
    978};
    979
    980static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
    981	.intctrl = mx21_intctrl,
    982	.prepare_message = mx21_prepare_message,
    983	.prepare_transfer = mx21_prepare_transfer,
    984	.trigger = mx21_trigger,
    985	.rx_available = mx21_rx_available,
    986	.reset = mx21_reset,
    987	.fifo_size = 8,
    988	.has_dmamode = false,
    989	.dynamic_burst = false,
    990	.has_slavemode = false,
    991	.devtype = IMX21_CSPI,
    992};
    993
    994static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
    995	/* i.mx27 cspi shares the functions with i.mx21 one */
    996	.intctrl = mx21_intctrl,
    997	.prepare_message = mx21_prepare_message,
    998	.prepare_transfer = mx21_prepare_transfer,
    999	.trigger = mx21_trigger,
   1000	.rx_available = mx21_rx_available,
   1001	.reset = mx21_reset,
   1002	.fifo_size = 8,
   1003	.has_dmamode = false,
   1004	.dynamic_burst = false,
   1005	.has_slavemode = false,
   1006	.devtype = IMX27_CSPI,
   1007};
   1008
   1009static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
   1010	.intctrl = mx31_intctrl,
   1011	.prepare_message = mx31_prepare_message,
   1012	.prepare_transfer = mx31_prepare_transfer,
   1013	.trigger = mx31_trigger,
   1014	.rx_available = mx31_rx_available,
   1015	.reset = mx31_reset,
   1016	.fifo_size = 8,
   1017	.has_dmamode = false,
   1018	.dynamic_burst = false,
   1019	.has_slavemode = false,
   1020	.devtype = IMX31_CSPI,
   1021};
   1022
   1023static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
   1024	/* i.mx35 and later cspi shares the functions with i.mx31 one */
   1025	.intctrl = mx31_intctrl,
   1026	.prepare_message = mx31_prepare_message,
   1027	.prepare_transfer = mx31_prepare_transfer,
   1028	.trigger = mx31_trigger,
   1029	.rx_available = mx31_rx_available,
   1030	.reset = mx31_reset,
   1031	.fifo_size = 8,
   1032	.has_dmamode = true,
   1033	.dynamic_burst = false,
   1034	.has_slavemode = false,
   1035	.devtype = IMX35_CSPI,
   1036};
   1037
   1038static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
   1039	.intctrl = mx51_ecspi_intctrl,
   1040	.prepare_message = mx51_ecspi_prepare_message,
   1041	.prepare_transfer = mx51_ecspi_prepare_transfer,
   1042	.trigger = mx51_ecspi_trigger,
   1043	.rx_available = mx51_ecspi_rx_available,
   1044	.reset = mx51_ecspi_reset,
   1045	.setup_wml = mx51_setup_wml,
   1046	.disable_dma = mx51_disable_dma,
   1047	.fifo_size = 64,
   1048	.has_dmamode = true,
   1049	.dynamic_burst = true,
   1050	.has_slavemode = true,
   1051	.disable = mx51_ecspi_disable,
   1052	.devtype = IMX51_ECSPI,
   1053};
   1054
   1055static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
   1056	.intctrl = mx51_ecspi_intctrl,
   1057	.prepare_message = mx51_ecspi_prepare_message,
   1058	.prepare_transfer = mx51_ecspi_prepare_transfer,
   1059	.trigger = mx51_ecspi_trigger,
   1060	.rx_available = mx51_ecspi_rx_available,
   1061	.disable_dma = mx51_disable_dma,
   1062	.reset = mx51_ecspi_reset,
   1063	.fifo_size = 64,
   1064	.has_dmamode = true,
   1065	.has_slavemode = true,
   1066	.disable = mx51_ecspi_disable,
   1067	.devtype = IMX53_ECSPI,
   1068};
   1069
   1070static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
   1071	.intctrl = mx51_ecspi_intctrl,
   1072	.prepare_message = mx51_ecspi_prepare_message,
   1073	.prepare_transfer = mx51_ecspi_prepare_transfer,
   1074	.trigger = mx51_ecspi_trigger,
   1075	.rx_available = mx51_ecspi_rx_available,
   1076	.reset = mx51_ecspi_reset,
   1077	.setup_wml = mx51_setup_wml,
   1078	.fifo_size = 64,
   1079	.has_dmamode = true,
   1080	.dynamic_burst = true,
   1081	.has_slavemode = true,
   1082	.tx_glitch_fixed = true,
   1083	.disable = mx51_ecspi_disable,
   1084	.devtype = IMX51_ECSPI,
   1085};
   1086
   1087static const struct of_device_id spi_imx_dt_ids[] = {
   1088	{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
   1089	{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
   1090	{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
   1091	{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
   1092	{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
   1093	{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
   1094	{ .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
   1095	{ .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
   1096	{ /* sentinel */ }
   1097};
   1098MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
   1099
   1100static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
   1101{
   1102	u32 ctrl;
   1103
   1104	ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
   1105	ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
   1106	ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
   1107	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
   1108}
   1109
   1110static void spi_imx_push(struct spi_imx_data *spi_imx)
   1111{
   1112	unsigned int burst_len;
   1113
   1114	/*
   1115	 * Reload the FIFO when the remaining bytes to be transferred in the
   1116	 * current burst is 0. This only applies when bits_per_word is a
   1117	 * multiple of 8.
   1118	 */
   1119	if (!spi_imx->remainder) {
   1120		if (spi_imx->dynamic_burst) {
   1121
   1122			/* We need to deal unaligned data first */
   1123			burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
   1124
   1125			if (!burst_len)
   1126				burst_len = MX51_ECSPI_CTRL_MAX_BURST;
   1127
   1128			spi_imx_set_burst_len(spi_imx, burst_len * 8);
   1129
   1130			spi_imx->remainder = burst_len;
   1131		} else {
   1132			spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
   1133		}
   1134	}
   1135
   1136	while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
   1137		if (!spi_imx->count)
   1138			break;
   1139		if (spi_imx->dynamic_burst &&
   1140		    spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
   1141			break;
   1142		spi_imx->tx(spi_imx);
   1143		spi_imx->txfifo++;
   1144	}
   1145
   1146	if (!spi_imx->slave_mode)
   1147		spi_imx->devtype_data->trigger(spi_imx);
   1148}
   1149
   1150static irqreturn_t spi_imx_isr(int irq, void *dev_id)
   1151{
   1152	struct spi_imx_data *spi_imx = dev_id;
   1153
   1154	while (spi_imx->txfifo &&
   1155	       spi_imx->devtype_data->rx_available(spi_imx)) {
   1156		spi_imx->rx(spi_imx);
   1157		spi_imx->txfifo--;
   1158	}
   1159
   1160	if (spi_imx->count) {
   1161		spi_imx_push(spi_imx);
   1162		return IRQ_HANDLED;
   1163	}
   1164
   1165	if (spi_imx->txfifo) {
   1166		/* No data left to push, but still waiting for rx data,
   1167		 * enable receive data available interrupt.
   1168		 */
   1169		spi_imx->devtype_data->intctrl(
   1170				spi_imx, MXC_INT_RR);
   1171		return IRQ_HANDLED;
   1172	}
   1173
   1174	spi_imx->devtype_data->intctrl(spi_imx, 0);
   1175	complete(&spi_imx->xfer_done);
   1176
   1177	return IRQ_HANDLED;
   1178}
   1179
   1180static int spi_imx_dma_configure(struct spi_controller *controller)
   1181{
   1182	int ret;
   1183	enum dma_slave_buswidth buswidth;
   1184	struct dma_slave_config rx = {}, tx = {};
   1185	struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
   1186
   1187	switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
   1188	case 4:
   1189		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
   1190		break;
   1191	case 2:
   1192		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
   1193		break;
   1194	case 1:
   1195		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
   1196		break;
   1197	default:
   1198		return -EINVAL;
   1199	}
   1200
   1201	tx.direction = DMA_MEM_TO_DEV;
   1202	tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
   1203	tx.dst_addr_width = buswidth;
   1204	tx.dst_maxburst = spi_imx->wml;
   1205	ret = dmaengine_slave_config(controller->dma_tx, &tx);
   1206	if (ret) {
   1207		dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
   1208		return ret;
   1209	}
   1210
   1211	rx.direction = DMA_DEV_TO_MEM;
   1212	rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
   1213	rx.src_addr_width = buswidth;
   1214	rx.src_maxburst = spi_imx->wml;
   1215	ret = dmaengine_slave_config(controller->dma_rx, &rx);
   1216	if (ret) {
   1217		dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
   1218		return ret;
   1219	}
   1220
   1221	return 0;
   1222}
   1223
   1224static int spi_imx_setupxfer(struct spi_device *spi,
   1225				 struct spi_transfer *t)
   1226{
   1227	struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
   1228
   1229	if (!t)
   1230		return 0;
   1231
   1232	if (!t->speed_hz) {
   1233		if (!spi->max_speed_hz) {
   1234			dev_err(&spi->dev, "no speed_hz provided!\n");
   1235			return -EINVAL;
   1236		}
   1237		dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
   1238		spi_imx->spi_bus_clk = spi->max_speed_hz;
   1239	} else
   1240		spi_imx->spi_bus_clk = t->speed_hz;
   1241
   1242	spi_imx->bits_per_word = t->bits_per_word;
   1243
   1244	/*
   1245	 * Initialize the functions for transfer. To transfer non byte-aligned
   1246	 * words, we have to use multiple word-size bursts, we can't use
   1247	 * dynamic_burst in that case.
   1248	 */
   1249	if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
   1250	    !(spi->mode & SPI_CS_WORD) &&
   1251	    (spi_imx->bits_per_word == 8 ||
   1252	    spi_imx->bits_per_word == 16 ||
   1253	    spi_imx->bits_per_word == 32)) {
   1254
   1255		spi_imx->rx = spi_imx_buf_rx_swap;
   1256		spi_imx->tx = spi_imx_buf_tx_swap;
   1257		spi_imx->dynamic_burst = 1;
   1258
   1259	} else {
   1260		if (spi_imx->bits_per_word <= 8) {
   1261			spi_imx->rx = spi_imx_buf_rx_u8;
   1262			spi_imx->tx = spi_imx_buf_tx_u8;
   1263		} else if (spi_imx->bits_per_word <= 16) {
   1264			spi_imx->rx = spi_imx_buf_rx_u16;
   1265			spi_imx->tx = spi_imx_buf_tx_u16;
   1266		} else {
   1267			spi_imx->rx = spi_imx_buf_rx_u32;
   1268			spi_imx->tx = spi_imx_buf_tx_u32;
   1269		}
   1270		spi_imx->dynamic_burst = 0;
   1271	}
   1272
   1273	if (spi_imx_can_dma(spi_imx->controller, spi, t))
   1274		spi_imx->usedma = true;
   1275	else
   1276		spi_imx->usedma = false;
   1277
   1278	spi_imx->rx_only = ((t->tx_buf == NULL)
   1279			|| (t->tx_buf == spi->controller->dummy_tx));
   1280
   1281	if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
   1282		spi_imx->rx = mx53_ecspi_rx_slave;
   1283		spi_imx->tx = mx53_ecspi_tx_slave;
   1284		spi_imx->slave_burst = t->len;
   1285	}
   1286
   1287	spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
   1288
   1289	return 0;
   1290}
   1291
   1292static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
   1293{
   1294	struct spi_controller *controller = spi_imx->controller;
   1295
   1296	if (controller->dma_rx) {
   1297		dma_release_channel(controller->dma_rx);
   1298		controller->dma_rx = NULL;
   1299	}
   1300
   1301	if (controller->dma_tx) {
   1302		dma_release_channel(controller->dma_tx);
   1303		controller->dma_tx = NULL;
   1304	}
   1305}
   1306
   1307static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
   1308			     struct spi_controller *controller)
   1309{
   1310	int ret;
   1311
   1312	spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
   1313
   1314	/* Prepare for TX DMA: */
   1315	controller->dma_tx = dma_request_chan(dev, "tx");
   1316	if (IS_ERR(controller->dma_tx)) {
   1317		ret = PTR_ERR(controller->dma_tx);
   1318		dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
   1319		controller->dma_tx = NULL;
   1320		goto err;
   1321	}
   1322
   1323	/* Prepare for RX : */
   1324	controller->dma_rx = dma_request_chan(dev, "rx");
   1325	if (IS_ERR(controller->dma_rx)) {
   1326		ret = PTR_ERR(controller->dma_rx);
   1327		dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
   1328		controller->dma_rx = NULL;
   1329		goto err;
   1330	}
   1331
   1332	init_completion(&spi_imx->dma_rx_completion);
   1333	init_completion(&spi_imx->dma_tx_completion);
   1334	controller->can_dma = spi_imx_can_dma;
   1335	controller->max_dma_len = MAX_SDMA_BD_BYTES;
   1336	spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
   1337					 SPI_CONTROLLER_MUST_TX;
   1338
   1339	return 0;
   1340err:
   1341	spi_imx_sdma_exit(spi_imx);
   1342	return ret;
   1343}
   1344
   1345static void spi_imx_dma_rx_callback(void *cookie)
   1346{
   1347	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
   1348
   1349	complete(&spi_imx->dma_rx_completion);
   1350}
   1351
   1352static void spi_imx_dma_tx_callback(void *cookie)
   1353{
   1354	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
   1355
   1356	complete(&spi_imx->dma_tx_completion);
   1357}
   1358
   1359static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
   1360{
   1361	unsigned long timeout = 0;
   1362
   1363	/* Time with actual data transfer and CS change delay related to HW */
   1364	timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
   1365
   1366	/* Add extra second for scheduler related activities */
   1367	timeout += 1;
   1368
   1369	/* Double calculated timeout */
   1370	return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
   1371}
   1372
   1373static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
   1374				struct spi_transfer *transfer)
   1375{
   1376	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
   1377	unsigned long transfer_timeout;
   1378	unsigned long timeout;
   1379	struct spi_controller *controller = spi_imx->controller;
   1380	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
   1381	struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
   1382	unsigned int bytes_per_word, i;
   1383	int ret;
   1384
   1385	/* Get the right burst length from the last sg to ensure no tail data */
   1386	bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
   1387	for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
   1388		if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
   1389			break;
   1390	}
   1391	/* Use 1 as wml in case no available burst length got */
   1392	if (i == 0)
   1393		i = 1;
   1394
   1395	spi_imx->wml =  i;
   1396
   1397	ret = spi_imx_dma_configure(controller);
   1398	if (ret)
   1399		goto dma_failure_no_start;
   1400
   1401	if (!spi_imx->devtype_data->setup_wml) {
   1402		dev_err(spi_imx->dev, "No setup_wml()?\n");
   1403		ret = -EINVAL;
   1404		goto dma_failure_no_start;
   1405	}
   1406	spi_imx->devtype_data->setup_wml(spi_imx);
   1407
   1408	/*
   1409	 * The TX DMA setup starts the transfer, so make sure RX is configured
   1410	 * before TX.
   1411	 */
   1412	desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
   1413				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
   1414				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
   1415	if (!desc_rx) {
   1416		ret = -EINVAL;
   1417		goto dma_failure_no_start;
   1418	}
   1419
   1420	desc_rx->callback = spi_imx_dma_rx_callback;
   1421	desc_rx->callback_param = (void *)spi_imx;
   1422	dmaengine_submit(desc_rx);
   1423	reinit_completion(&spi_imx->dma_rx_completion);
   1424	dma_async_issue_pending(controller->dma_rx);
   1425
   1426	desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
   1427				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
   1428				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
   1429	if (!desc_tx) {
   1430		dmaengine_terminate_all(controller->dma_tx);
   1431		dmaengine_terminate_all(controller->dma_rx);
   1432		return -EINVAL;
   1433	}
   1434
   1435	desc_tx->callback = spi_imx_dma_tx_callback;
   1436	desc_tx->callback_param = (void *)spi_imx;
   1437	dmaengine_submit(desc_tx);
   1438	reinit_completion(&spi_imx->dma_tx_completion);
   1439	dma_async_issue_pending(controller->dma_tx);
   1440
   1441	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
   1442
   1443	/* Wait SDMA to finish the data transfer.*/
   1444	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
   1445						transfer_timeout);
   1446	if (!timeout) {
   1447		dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
   1448		dmaengine_terminate_all(controller->dma_tx);
   1449		dmaengine_terminate_all(controller->dma_rx);
   1450		return -ETIMEDOUT;
   1451	}
   1452
   1453	timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
   1454					      transfer_timeout);
   1455	if (!timeout) {
   1456		dev_err(&controller->dev, "I/O Error in DMA RX\n");
   1457		spi_imx->devtype_data->reset(spi_imx);
   1458		dmaengine_terminate_all(controller->dma_rx);
   1459		return -ETIMEDOUT;
   1460	}
   1461
   1462	return 0;
   1463/* fallback to pio */
   1464dma_failure_no_start:
   1465	transfer->error |= SPI_TRANS_FAIL_NO_START;
   1466	return ret;
   1467}
   1468
   1469static int spi_imx_pio_transfer(struct spi_device *spi,
   1470				struct spi_transfer *transfer)
   1471{
   1472	struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
   1473	unsigned long transfer_timeout;
   1474	unsigned long timeout;
   1475
   1476	spi_imx->tx_buf = transfer->tx_buf;
   1477	spi_imx->rx_buf = transfer->rx_buf;
   1478	spi_imx->count = transfer->len;
   1479	spi_imx->txfifo = 0;
   1480	spi_imx->remainder = 0;
   1481
   1482	reinit_completion(&spi_imx->xfer_done);
   1483
   1484	spi_imx_push(spi_imx);
   1485
   1486	spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
   1487
   1488	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
   1489
   1490	timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
   1491					      transfer_timeout);
   1492	if (!timeout) {
   1493		dev_err(&spi->dev, "I/O Error in PIO\n");
   1494		spi_imx->devtype_data->reset(spi_imx);
   1495		return -ETIMEDOUT;
   1496	}
   1497
   1498	return 0;
   1499}
   1500
   1501static int spi_imx_poll_transfer(struct spi_device *spi,
   1502				 struct spi_transfer *transfer)
   1503{
   1504	struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
   1505	unsigned long timeout;
   1506
   1507	spi_imx->tx_buf = transfer->tx_buf;
   1508	spi_imx->rx_buf = transfer->rx_buf;
   1509	spi_imx->count = transfer->len;
   1510	spi_imx->txfifo = 0;
   1511	spi_imx->remainder = 0;
   1512
   1513	/* fill in the fifo before timeout calculations if we are
   1514	 * interrupted here, then the data is getting transferred by
   1515	 * the HW while we are interrupted
   1516	 */
   1517	spi_imx_push(spi_imx);
   1518
   1519	timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
   1520	while (spi_imx->txfifo) {
   1521		/* RX */
   1522		while (spi_imx->txfifo &&
   1523		       spi_imx->devtype_data->rx_available(spi_imx)) {
   1524			spi_imx->rx(spi_imx);
   1525			spi_imx->txfifo--;
   1526		}
   1527
   1528		/* TX */
   1529		if (spi_imx->count) {
   1530			spi_imx_push(spi_imx);
   1531			continue;
   1532		}
   1533
   1534		if (spi_imx->txfifo &&
   1535		    time_after(jiffies, timeout)) {
   1536
   1537			dev_err_ratelimited(&spi->dev,
   1538					    "timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
   1539					    jiffies - timeout);
   1540
   1541			/* fall back to interrupt mode */
   1542			return spi_imx_pio_transfer(spi, transfer);
   1543		}
   1544	}
   1545
   1546	return 0;
   1547}
   1548
   1549static int spi_imx_pio_transfer_slave(struct spi_device *spi,
   1550				      struct spi_transfer *transfer)
   1551{
   1552	struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
   1553	int ret = 0;
   1554
   1555	if (is_imx53_ecspi(spi_imx) &&
   1556	    transfer->len > MX53_MAX_TRANSFER_BYTES) {
   1557		dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
   1558			MX53_MAX_TRANSFER_BYTES);
   1559		return -EMSGSIZE;
   1560	}
   1561
   1562	spi_imx->tx_buf = transfer->tx_buf;
   1563	spi_imx->rx_buf = transfer->rx_buf;
   1564	spi_imx->count = transfer->len;
   1565	spi_imx->txfifo = 0;
   1566	spi_imx->remainder = 0;
   1567
   1568	reinit_completion(&spi_imx->xfer_done);
   1569	spi_imx->slave_aborted = false;
   1570
   1571	spi_imx_push(spi_imx);
   1572
   1573	spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
   1574
   1575	if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
   1576	    spi_imx->slave_aborted) {
   1577		dev_dbg(&spi->dev, "interrupted\n");
   1578		ret = -EINTR;
   1579	}
   1580
   1581	/* ecspi has a HW issue when works in Slave mode,
   1582	 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
   1583	 * ECSPI_TXDATA keeps shift out the last word data,
   1584	 * so we have to disable ECSPI when in slave mode after the
   1585	 * transfer completes
   1586	 */
   1587	if (spi_imx->devtype_data->disable)
   1588		spi_imx->devtype_data->disable(spi_imx);
   1589
   1590	return ret;
   1591}
   1592
   1593static int spi_imx_transfer_one(struct spi_controller *controller,
   1594				struct spi_device *spi,
   1595				struct spi_transfer *transfer)
   1596{
   1597	struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
   1598	unsigned long hz_per_byte, byte_limit;
   1599
   1600	spi_imx_setupxfer(spi, transfer);
   1601	transfer->effective_speed_hz = spi_imx->spi_bus_clk;
   1602
   1603	/* flush rxfifo before transfer */
   1604	while (spi_imx->devtype_data->rx_available(spi_imx))
   1605		readl(spi_imx->base + MXC_CSPIRXDATA);
   1606
   1607	if (spi_imx->slave_mode)
   1608		return spi_imx_pio_transfer_slave(spi, transfer);
   1609
   1610	/*
   1611	 * Calculate the estimated time in us the transfer runs. Find
   1612	 * the number of Hz per byte per polling limit.
   1613	 */
   1614	hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
   1615	byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
   1616
   1617	/* run in polling mode for short transfers */
   1618	if (transfer->len < byte_limit)
   1619		return spi_imx_poll_transfer(spi, transfer);
   1620
   1621	if (spi_imx->usedma)
   1622		return spi_imx_dma_transfer(spi_imx, transfer);
   1623
   1624	return spi_imx_pio_transfer(spi, transfer);
   1625}
   1626
   1627static int spi_imx_setup(struct spi_device *spi)
   1628{
   1629	dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
   1630		 spi->mode, spi->bits_per_word, spi->max_speed_hz);
   1631
   1632	return 0;
   1633}
   1634
   1635static void spi_imx_cleanup(struct spi_device *spi)
   1636{
   1637}
   1638
   1639static int
   1640spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
   1641{
   1642	struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
   1643	int ret;
   1644
   1645	ret = pm_runtime_resume_and_get(spi_imx->dev);
   1646	if (ret < 0) {
   1647		dev_err(spi_imx->dev, "failed to enable clock\n");
   1648		return ret;
   1649	}
   1650
   1651	ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
   1652	if (ret) {
   1653		pm_runtime_mark_last_busy(spi_imx->dev);
   1654		pm_runtime_put_autosuspend(spi_imx->dev);
   1655	}
   1656
   1657	return ret;
   1658}
   1659
   1660static int
   1661spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
   1662{
   1663	struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
   1664
   1665	pm_runtime_mark_last_busy(spi_imx->dev);
   1666	pm_runtime_put_autosuspend(spi_imx->dev);
   1667	return 0;
   1668}
   1669
   1670static int spi_imx_slave_abort(struct spi_controller *controller)
   1671{
   1672	struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
   1673
   1674	spi_imx->slave_aborted = true;
   1675	complete(&spi_imx->xfer_done);
   1676
   1677	return 0;
   1678}
   1679
   1680static int spi_imx_probe(struct platform_device *pdev)
   1681{
   1682	struct device_node *np = pdev->dev.of_node;
   1683	struct spi_controller *controller;
   1684	struct spi_imx_data *spi_imx;
   1685	struct resource *res;
   1686	int ret, irq, spi_drctl;
   1687	const struct spi_imx_devtype_data *devtype_data =
   1688			of_device_get_match_data(&pdev->dev);
   1689	bool slave_mode;
   1690	u32 val;
   1691
   1692	slave_mode = devtype_data->has_slavemode &&
   1693			of_property_read_bool(np, "spi-slave");
   1694	if (slave_mode)
   1695		controller = spi_alloc_slave(&pdev->dev,
   1696					     sizeof(struct spi_imx_data));
   1697	else
   1698		controller = spi_alloc_master(&pdev->dev,
   1699					      sizeof(struct spi_imx_data));
   1700	if (!controller)
   1701		return -ENOMEM;
   1702
   1703	ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
   1704	if ((ret < 0) || (spi_drctl >= 0x3)) {
   1705		/* '11' is reserved */
   1706		spi_drctl = 0;
   1707	}
   1708
   1709	platform_set_drvdata(pdev, controller);
   1710
   1711	controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
   1712	controller->bus_num = np ? -1 : pdev->id;
   1713	controller->use_gpio_descriptors = true;
   1714
   1715	spi_imx = spi_controller_get_devdata(controller);
   1716	spi_imx->controller = controller;
   1717	spi_imx->dev = &pdev->dev;
   1718	spi_imx->slave_mode = slave_mode;
   1719
   1720	spi_imx->devtype_data = devtype_data;
   1721
   1722	/*
   1723	 * Get number of chip selects from device properties. This can be
   1724	 * coming from device tree or boardfiles, if it is not defined,
   1725	 * a default value of 3 chip selects will be used, as all the legacy
   1726	 * board files have <= 3 chip selects.
   1727	 */
   1728	if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
   1729		controller->num_chipselect = val;
   1730	else
   1731		controller->num_chipselect = 3;
   1732
   1733	spi_imx->controller->transfer_one = spi_imx_transfer_one;
   1734	spi_imx->controller->setup = spi_imx_setup;
   1735	spi_imx->controller->cleanup = spi_imx_cleanup;
   1736	spi_imx->controller->prepare_message = spi_imx_prepare_message;
   1737	spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
   1738	spi_imx->controller->slave_abort = spi_imx_slave_abort;
   1739	spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
   1740
   1741	if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
   1742	    is_imx53_ecspi(spi_imx))
   1743		spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
   1744
   1745	if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
   1746		spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
   1747
   1748	if (is_imx51_ecspi(spi_imx) &&
   1749	    device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
   1750		/*
   1751		 * When using HW-CS implementing SPI_CS_WORD can be done by just
   1752		 * setting the burst length to the word size. This is
   1753		 * considerably faster than manually controlling the CS.
   1754		 */
   1755		spi_imx->controller->mode_bits |= SPI_CS_WORD;
   1756
   1757	spi_imx->spi_drctl = spi_drctl;
   1758
   1759	init_completion(&spi_imx->xfer_done);
   1760
   1761	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1762	spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
   1763	if (IS_ERR(spi_imx->base)) {
   1764		ret = PTR_ERR(spi_imx->base);
   1765		goto out_controller_put;
   1766	}
   1767	spi_imx->base_phys = res->start;
   1768
   1769	irq = platform_get_irq(pdev, 0);
   1770	if (irq < 0) {
   1771		ret = irq;
   1772		goto out_controller_put;
   1773	}
   1774
   1775	ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
   1776			       dev_name(&pdev->dev), spi_imx);
   1777	if (ret) {
   1778		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
   1779		goto out_controller_put;
   1780	}
   1781
   1782	spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
   1783	if (IS_ERR(spi_imx->clk_ipg)) {
   1784		ret = PTR_ERR(spi_imx->clk_ipg);
   1785		goto out_controller_put;
   1786	}
   1787
   1788	spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
   1789	if (IS_ERR(spi_imx->clk_per)) {
   1790		ret = PTR_ERR(spi_imx->clk_per);
   1791		goto out_controller_put;
   1792	}
   1793
   1794	ret = clk_prepare_enable(spi_imx->clk_per);
   1795	if (ret)
   1796		goto out_controller_put;
   1797
   1798	ret = clk_prepare_enable(spi_imx->clk_ipg);
   1799	if (ret)
   1800		goto out_put_per;
   1801
   1802	pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
   1803	pm_runtime_use_autosuspend(spi_imx->dev);
   1804	pm_runtime_get_noresume(spi_imx->dev);
   1805	pm_runtime_set_active(spi_imx->dev);
   1806	pm_runtime_enable(spi_imx->dev);
   1807
   1808	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
   1809	/*
   1810	 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
   1811	 * if validated on other chips.
   1812	 */
   1813	if (spi_imx->devtype_data->has_dmamode) {
   1814		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
   1815		if (ret == -EPROBE_DEFER)
   1816			goto out_runtime_pm_put;
   1817
   1818		if (ret < 0)
   1819			dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
   1820				ret);
   1821	}
   1822
   1823	spi_imx->devtype_data->reset(spi_imx);
   1824
   1825	spi_imx->devtype_data->intctrl(spi_imx, 0);
   1826
   1827	controller->dev.of_node = pdev->dev.of_node;
   1828	ret = spi_register_controller(controller);
   1829	if (ret) {
   1830		dev_err_probe(&pdev->dev, ret, "register controller failed\n");
   1831		goto out_register_controller;
   1832	}
   1833
   1834	pm_runtime_mark_last_busy(spi_imx->dev);
   1835	pm_runtime_put_autosuspend(spi_imx->dev);
   1836
   1837	return ret;
   1838
   1839out_register_controller:
   1840	if (spi_imx->devtype_data->has_dmamode)
   1841		spi_imx_sdma_exit(spi_imx);
   1842out_runtime_pm_put:
   1843	pm_runtime_dont_use_autosuspend(spi_imx->dev);
   1844	pm_runtime_set_suspended(&pdev->dev);
   1845	pm_runtime_disable(spi_imx->dev);
   1846
   1847	clk_disable_unprepare(spi_imx->clk_ipg);
   1848out_put_per:
   1849	clk_disable_unprepare(spi_imx->clk_per);
   1850out_controller_put:
   1851	spi_controller_put(controller);
   1852
   1853	return ret;
   1854}
   1855
   1856static int spi_imx_remove(struct platform_device *pdev)
   1857{
   1858	struct spi_controller *controller = platform_get_drvdata(pdev);
   1859	struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
   1860	int ret;
   1861
   1862	spi_unregister_controller(controller);
   1863
   1864	ret = pm_runtime_resume_and_get(spi_imx->dev);
   1865	if (ret < 0) {
   1866		dev_err(spi_imx->dev, "failed to enable clock\n");
   1867		return ret;
   1868	}
   1869
   1870	writel(0, spi_imx->base + MXC_CSPICTRL);
   1871
   1872	pm_runtime_dont_use_autosuspend(spi_imx->dev);
   1873	pm_runtime_put_sync(spi_imx->dev);
   1874	pm_runtime_disable(spi_imx->dev);
   1875
   1876	spi_imx_sdma_exit(spi_imx);
   1877
   1878	return 0;
   1879}
   1880
   1881static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
   1882{
   1883	struct spi_controller *controller = dev_get_drvdata(dev);
   1884	struct spi_imx_data *spi_imx;
   1885	int ret;
   1886
   1887	spi_imx = spi_controller_get_devdata(controller);
   1888
   1889	ret = clk_prepare_enable(spi_imx->clk_per);
   1890	if (ret)
   1891		return ret;
   1892
   1893	ret = clk_prepare_enable(spi_imx->clk_ipg);
   1894	if (ret) {
   1895		clk_disable_unprepare(spi_imx->clk_per);
   1896		return ret;
   1897	}
   1898
   1899	return 0;
   1900}
   1901
   1902static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
   1903{
   1904	struct spi_controller *controller = dev_get_drvdata(dev);
   1905	struct spi_imx_data *spi_imx;
   1906
   1907	spi_imx = spi_controller_get_devdata(controller);
   1908
   1909	clk_disable_unprepare(spi_imx->clk_per);
   1910	clk_disable_unprepare(spi_imx->clk_ipg);
   1911
   1912	return 0;
   1913}
   1914
   1915static int __maybe_unused spi_imx_suspend(struct device *dev)
   1916{
   1917	pinctrl_pm_select_sleep_state(dev);
   1918	return 0;
   1919}
   1920
   1921static int __maybe_unused spi_imx_resume(struct device *dev)
   1922{
   1923	pinctrl_pm_select_default_state(dev);
   1924	return 0;
   1925}
   1926
   1927static const struct dev_pm_ops imx_spi_pm = {
   1928	SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
   1929				spi_imx_runtime_resume, NULL)
   1930	SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
   1931};
   1932
   1933static struct platform_driver spi_imx_driver = {
   1934	.driver = {
   1935		   .name = DRIVER_NAME,
   1936		   .of_match_table = spi_imx_dt_ids,
   1937		   .pm = &imx_spi_pm,
   1938	},
   1939	.probe = spi_imx_probe,
   1940	.remove = spi_imx_remove,
   1941};
   1942module_platform_driver(spi_imx_driver);
   1943
   1944MODULE_DESCRIPTION("i.MX SPI Controller driver");
   1945MODULE_AUTHOR("Sascha Hauer, Pengutronix");
   1946MODULE_LICENSE("GPL");
   1947MODULE_ALIAS("platform:" DRIVER_NAME);