cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-s3c64xx.c (39877B)


      1// SPDX-License-Identifier: GPL-2.0+
      2//
      3// Copyright (c) 2009 Samsung Electronics Co., Ltd.
      4//      Jaswinder Singh <jassi.brar@samsung.com>
      5
      6#include <linux/init.h>
      7#include <linux/module.h>
      8#include <linux/interrupt.h>
      9#include <linux/delay.h>
     10#include <linux/clk.h>
     11#include <linux/dma-mapping.h>
     12#include <linux/dmaengine.h>
     13#include <linux/platform_device.h>
     14#include <linux/pm_runtime.h>
     15#include <linux/spi/spi.h>
     16#include <linux/of.h>
     17#include <linux/of_device.h>
     18
     19#include <linux/platform_data/spi-s3c64xx.h>
     20
     21#define MAX_SPI_PORTS		6
     22#define S3C64XX_SPI_QUIRK_POLL		(1 << 0)
     23#define S3C64XX_SPI_QUIRK_CS_AUTO	(1 << 1)
     24#define AUTOSUSPEND_TIMEOUT	2000
     25
     26/* Registers and bit-fields */
     27
     28#define S3C64XX_SPI_CH_CFG		0x00
     29#define S3C64XX_SPI_CLK_CFG		0x04
     30#define S3C64XX_SPI_MODE_CFG		0x08
     31#define S3C64XX_SPI_CS_REG		0x0C
     32#define S3C64XX_SPI_INT_EN		0x10
     33#define S3C64XX_SPI_STATUS		0x14
     34#define S3C64XX_SPI_TX_DATA		0x18
     35#define S3C64XX_SPI_RX_DATA		0x1C
     36#define S3C64XX_SPI_PACKET_CNT		0x20
     37#define S3C64XX_SPI_PENDING_CLR		0x24
     38#define S3C64XX_SPI_SWAP_CFG		0x28
     39#define S3C64XX_SPI_FB_CLK		0x2C
     40
     41#define S3C64XX_SPI_CH_HS_EN		(1<<6)	/* High Speed Enable */
     42#define S3C64XX_SPI_CH_SW_RST		(1<<5)
     43#define S3C64XX_SPI_CH_SLAVE		(1<<4)
     44#define S3C64XX_SPI_CPOL_L		(1<<3)
     45#define S3C64XX_SPI_CPHA_B		(1<<2)
     46#define S3C64XX_SPI_CH_RXCH_ON		(1<<1)
     47#define S3C64XX_SPI_CH_TXCH_ON		(1<<0)
     48
     49#define S3C64XX_SPI_CLKSEL_SRCMSK	(3<<9)
     50#define S3C64XX_SPI_CLKSEL_SRCSHFT	9
     51#define S3C64XX_SPI_ENCLK_ENABLE	(1<<8)
     52#define S3C64XX_SPI_PSR_MASK		0xff
     53
     54#define S3C64XX_SPI_MODE_CH_TSZ_BYTE		(0<<29)
     55#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD	(1<<29)
     56#define S3C64XX_SPI_MODE_CH_TSZ_WORD		(2<<29)
     57#define S3C64XX_SPI_MODE_CH_TSZ_MASK		(3<<29)
     58#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE		(0<<17)
     59#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD	(1<<17)
     60#define S3C64XX_SPI_MODE_BUS_TSZ_WORD		(2<<17)
     61#define S3C64XX_SPI_MODE_BUS_TSZ_MASK		(3<<17)
     62#define S3C64XX_SPI_MODE_RXDMA_ON		(1<<2)
     63#define S3C64XX_SPI_MODE_TXDMA_ON		(1<<1)
     64#define S3C64XX_SPI_MODE_4BURST			(1<<0)
     65
     66#define S3C64XX_SPI_CS_NSC_CNT_2		(2<<4)
     67#define S3C64XX_SPI_CS_AUTO			(1<<1)
     68#define S3C64XX_SPI_CS_SIG_INACT		(1<<0)
     69
     70#define S3C64XX_SPI_INT_TRAILING_EN		(1<<6)
     71#define S3C64XX_SPI_INT_RX_OVERRUN_EN		(1<<5)
     72#define S3C64XX_SPI_INT_RX_UNDERRUN_EN		(1<<4)
     73#define S3C64XX_SPI_INT_TX_OVERRUN_EN		(1<<3)
     74#define S3C64XX_SPI_INT_TX_UNDERRUN_EN		(1<<2)
     75#define S3C64XX_SPI_INT_RX_FIFORDY_EN		(1<<1)
     76#define S3C64XX_SPI_INT_TX_FIFORDY_EN		(1<<0)
     77
     78#define S3C64XX_SPI_ST_RX_OVERRUN_ERR		(1<<5)
     79#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR		(1<<4)
     80#define S3C64XX_SPI_ST_TX_OVERRUN_ERR		(1<<3)
     81#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR		(1<<2)
     82#define S3C64XX_SPI_ST_RX_FIFORDY		(1<<1)
     83#define S3C64XX_SPI_ST_TX_FIFORDY		(1<<0)
     84
     85#define S3C64XX_SPI_PACKET_CNT_EN		(1<<16)
     86
     87#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR		(1<<4)
     88#define S3C64XX_SPI_PND_TX_OVERRUN_CLR		(1<<3)
     89#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR		(1<<2)
     90#define S3C64XX_SPI_PND_RX_OVERRUN_CLR		(1<<1)
     91#define S3C64XX_SPI_PND_TRAILING_CLR		(1<<0)
     92
     93#define S3C64XX_SPI_SWAP_RX_HALF_WORD		(1<<7)
     94#define S3C64XX_SPI_SWAP_RX_BYTE		(1<<6)
     95#define S3C64XX_SPI_SWAP_RX_BIT			(1<<5)
     96#define S3C64XX_SPI_SWAP_RX_EN			(1<<4)
     97#define S3C64XX_SPI_SWAP_TX_HALF_WORD		(1<<3)
     98#define S3C64XX_SPI_SWAP_TX_BYTE		(1<<2)
     99#define S3C64XX_SPI_SWAP_TX_BIT			(1<<1)
    100#define S3C64XX_SPI_SWAP_TX_EN			(1<<0)
    101
    102#define S3C64XX_SPI_FBCLK_MSK			(3<<0)
    103
    104#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
    105#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
    106				(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
    107#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
    108#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
    109					FIFO_LVL_MASK(i))
    110
    111#define S3C64XX_SPI_MAX_TRAILCNT	0x3ff
    112#define S3C64XX_SPI_TRAILCNT_OFF	19
    113
    114#define S3C64XX_SPI_TRAILCNT		S3C64XX_SPI_MAX_TRAILCNT
    115
    116#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
    117#define is_polling(x)	(x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
    118
    119#define RXBUSY    (1<<2)
    120#define TXBUSY    (1<<3)
    121
    122struct s3c64xx_spi_dma_data {
    123	struct dma_chan *ch;
    124	dma_cookie_t cookie;
    125	enum dma_transfer_direction direction;
    126};
    127
    128/**
    129 * struct s3c64xx_spi_port_config - SPI Controller hardware info
    130 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
    131 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
    132 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
    133 * @quirks: Bitmask of known quirks
    134 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
    135 * @clk_from_cmu: True, if the controller does not include a clock mux and
    136 *	prescaler unit.
    137 * @clk_ioclk: True if clock is present on this device
    138 *
    139 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
    140 * differ in some aspects such as the size of the fifo and spi bus clock
    141 * setup. Such differences are specified to the driver using this structure
    142 * which is provided as driver data to the driver.
    143 */
    144struct s3c64xx_spi_port_config {
    145	int	fifo_lvl_mask[MAX_SPI_PORTS];
    146	int	rx_lvl_offset;
    147	int	tx_st_done;
    148	int	quirks;
    149	bool	high_speed;
    150	bool	clk_from_cmu;
    151	bool	clk_ioclk;
    152};
    153
    154/**
    155 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
    156 * @clk: Pointer to the spi clock.
    157 * @src_clk: Pointer to the clock used to generate SPI signals.
    158 * @ioclk: Pointer to the i/o clock between master and slave
    159 * @pdev: Pointer to device's platform device data
    160 * @master: Pointer to the SPI Protocol master.
    161 * @cntrlr_info: Platform specific data for the controller this driver manages.
    162 * @lock: Controller specific lock.
    163 * @state: Set of FLAGS to indicate status.
    164 * @sfr_start: BUS address of SPI controller regs.
    165 * @regs: Pointer to ioremap'ed controller registers.
    166 * @xfer_completion: To indicate completion of xfer task.
    167 * @cur_mode: Stores the active configuration of the controller.
    168 * @cur_bpw: Stores the active bits per word settings.
    169 * @cur_speed: Current clock speed
    170 * @rx_dma: Local receive DMA data (e.g. chan and direction)
    171 * @tx_dma: Local transmit DMA data (e.g. chan and direction)
    172 * @port_conf: Local SPI port configuartion data
    173 * @port_id: Port identification number
    174 */
    175struct s3c64xx_spi_driver_data {
    176	void __iomem                    *regs;
    177	struct clk                      *clk;
    178	struct clk                      *src_clk;
    179	struct clk                      *ioclk;
    180	struct platform_device          *pdev;
    181	struct spi_master               *master;
    182	struct s3c64xx_spi_info         *cntrlr_info;
    183	spinlock_t                      lock;
    184	unsigned long                   sfr_start;
    185	struct completion               xfer_completion;
    186	unsigned                        state;
    187	unsigned                        cur_mode, cur_bpw;
    188	unsigned                        cur_speed;
    189	struct s3c64xx_spi_dma_data	rx_dma;
    190	struct s3c64xx_spi_dma_data	tx_dma;
    191	const struct s3c64xx_spi_port_config	*port_conf;
    192	unsigned int			port_id;
    193};
    194
    195static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
    196{
    197	void __iomem *regs = sdd->regs;
    198	unsigned long loops;
    199	u32 val;
    200
    201	writel(0, regs + S3C64XX_SPI_PACKET_CNT);
    202
    203	val = readl(regs + S3C64XX_SPI_CH_CFG);
    204	val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
    205	writel(val, regs + S3C64XX_SPI_CH_CFG);
    206
    207	val = readl(regs + S3C64XX_SPI_CH_CFG);
    208	val |= S3C64XX_SPI_CH_SW_RST;
    209	val &= ~S3C64XX_SPI_CH_HS_EN;
    210	writel(val, regs + S3C64XX_SPI_CH_CFG);
    211
    212	/* Flush TxFIFO*/
    213	loops = msecs_to_loops(1);
    214	do {
    215		val = readl(regs + S3C64XX_SPI_STATUS);
    216	} while (TX_FIFO_LVL(val, sdd) && loops--);
    217
    218	if (loops == 0)
    219		dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
    220
    221	/* Flush RxFIFO*/
    222	loops = msecs_to_loops(1);
    223	do {
    224		val = readl(regs + S3C64XX_SPI_STATUS);
    225		if (RX_FIFO_LVL(val, sdd))
    226			readl(regs + S3C64XX_SPI_RX_DATA);
    227		else
    228			break;
    229	} while (loops--);
    230
    231	if (loops == 0)
    232		dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
    233
    234	val = readl(regs + S3C64XX_SPI_CH_CFG);
    235	val &= ~S3C64XX_SPI_CH_SW_RST;
    236	writel(val, regs + S3C64XX_SPI_CH_CFG);
    237
    238	val = readl(regs + S3C64XX_SPI_MODE_CFG);
    239	val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
    240	writel(val, regs + S3C64XX_SPI_MODE_CFG);
    241}
    242
    243static void s3c64xx_spi_dmacb(void *data)
    244{
    245	struct s3c64xx_spi_driver_data *sdd;
    246	struct s3c64xx_spi_dma_data *dma = data;
    247	unsigned long flags;
    248
    249	if (dma->direction == DMA_DEV_TO_MEM)
    250		sdd = container_of(data,
    251			struct s3c64xx_spi_driver_data, rx_dma);
    252	else
    253		sdd = container_of(data,
    254			struct s3c64xx_spi_driver_data, tx_dma);
    255
    256	spin_lock_irqsave(&sdd->lock, flags);
    257
    258	if (dma->direction == DMA_DEV_TO_MEM) {
    259		sdd->state &= ~RXBUSY;
    260		if (!(sdd->state & TXBUSY))
    261			complete(&sdd->xfer_completion);
    262	} else {
    263		sdd->state &= ~TXBUSY;
    264		if (!(sdd->state & RXBUSY))
    265			complete(&sdd->xfer_completion);
    266	}
    267
    268	spin_unlock_irqrestore(&sdd->lock, flags);
    269}
    270
    271static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
    272			struct sg_table *sgt)
    273{
    274	struct s3c64xx_spi_driver_data *sdd;
    275	struct dma_slave_config config;
    276	struct dma_async_tx_descriptor *desc;
    277	int ret;
    278
    279	memset(&config, 0, sizeof(config));
    280
    281	if (dma->direction == DMA_DEV_TO_MEM) {
    282		sdd = container_of((void *)dma,
    283			struct s3c64xx_spi_driver_data, rx_dma);
    284		config.direction = dma->direction;
    285		config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
    286		config.src_addr_width = sdd->cur_bpw / 8;
    287		config.src_maxburst = 1;
    288		dmaengine_slave_config(dma->ch, &config);
    289	} else {
    290		sdd = container_of((void *)dma,
    291			struct s3c64xx_spi_driver_data, tx_dma);
    292		config.direction = dma->direction;
    293		config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
    294		config.dst_addr_width = sdd->cur_bpw / 8;
    295		config.dst_maxburst = 1;
    296		dmaengine_slave_config(dma->ch, &config);
    297	}
    298
    299	desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
    300				       dma->direction, DMA_PREP_INTERRUPT);
    301	if (!desc) {
    302		dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
    303			dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
    304		return -ENOMEM;
    305	}
    306
    307	desc->callback = s3c64xx_spi_dmacb;
    308	desc->callback_param = dma;
    309
    310	dma->cookie = dmaengine_submit(desc);
    311	ret = dma_submit_error(dma->cookie);
    312	if (ret) {
    313		dev_err(&sdd->pdev->dev, "DMA submission failed");
    314		return -EIO;
    315	}
    316
    317	dma_async_issue_pending(dma->ch);
    318	return 0;
    319}
    320
    321static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
    322{
    323	struct s3c64xx_spi_driver_data *sdd =
    324					spi_master_get_devdata(spi->master);
    325
    326	if (sdd->cntrlr_info->no_cs)
    327		return;
    328
    329	if (enable) {
    330		if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
    331			writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
    332		} else {
    333			u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
    334
    335			ssel |= (S3C64XX_SPI_CS_AUTO |
    336						S3C64XX_SPI_CS_NSC_CNT_2);
    337			writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
    338		}
    339	} else {
    340		if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
    341			writel(S3C64XX_SPI_CS_SIG_INACT,
    342			       sdd->regs + S3C64XX_SPI_CS_REG);
    343	}
    344}
    345
    346static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
    347{
    348	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
    349
    350	if (is_polling(sdd))
    351		return 0;
    352
    353	spi->dma_rx = sdd->rx_dma.ch;
    354	spi->dma_tx = sdd->tx_dma.ch;
    355
    356	return 0;
    357}
    358
    359static bool s3c64xx_spi_can_dma(struct spi_master *master,
    360				struct spi_device *spi,
    361				struct spi_transfer *xfer)
    362{
    363	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
    364
    365	return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
    366}
    367
    368static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
    369				    struct spi_transfer *xfer, int dma_mode)
    370{
    371	void __iomem *regs = sdd->regs;
    372	u32 modecfg, chcfg;
    373	int ret = 0;
    374
    375	modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
    376	modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
    377
    378	chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
    379	chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
    380
    381	if (dma_mode) {
    382		chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
    383	} else {
    384		/* Always shift in data in FIFO, even if xfer is Tx only,
    385		 * this helps setting PCKT_CNT value for generating clocks
    386		 * as exactly needed.
    387		 */
    388		chcfg |= S3C64XX_SPI_CH_RXCH_ON;
    389		writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
    390					| S3C64XX_SPI_PACKET_CNT_EN,
    391					regs + S3C64XX_SPI_PACKET_CNT);
    392	}
    393
    394	if (xfer->tx_buf != NULL) {
    395		sdd->state |= TXBUSY;
    396		chcfg |= S3C64XX_SPI_CH_TXCH_ON;
    397		if (dma_mode) {
    398			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
    399			ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
    400		} else {
    401			switch (sdd->cur_bpw) {
    402			case 32:
    403				iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
    404					xfer->tx_buf, xfer->len / 4);
    405				break;
    406			case 16:
    407				iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
    408					xfer->tx_buf, xfer->len / 2);
    409				break;
    410			default:
    411				iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
    412					xfer->tx_buf, xfer->len);
    413				break;
    414			}
    415		}
    416	}
    417
    418	if (xfer->rx_buf != NULL) {
    419		sdd->state |= RXBUSY;
    420
    421		if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
    422					&& !(sdd->cur_mode & SPI_CPHA))
    423			chcfg |= S3C64XX_SPI_CH_HS_EN;
    424
    425		if (dma_mode) {
    426			modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
    427			chcfg |= S3C64XX_SPI_CH_RXCH_ON;
    428			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
    429					| S3C64XX_SPI_PACKET_CNT_EN,
    430					regs + S3C64XX_SPI_PACKET_CNT);
    431			ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
    432		}
    433	}
    434
    435	if (ret)
    436		return ret;
    437
    438	writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
    439	writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
    440
    441	return 0;
    442}
    443
    444static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
    445					int timeout_ms)
    446{
    447	void __iomem *regs = sdd->regs;
    448	unsigned long val = 1;
    449	u32 status;
    450
    451	/* max fifo depth available */
    452	u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
    453
    454	if (timeout_ms)
    455		val = msecs_to_loops(timeout_ms);
    456
    457	do {
    458		status = readl(regs + S3C64XX_SPI_STATUS);
    459	} while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
    460
    461	/* return the actual received data length */
    462	return RX_FIFO_LVL(status, sdd);
    463}
    464
    465static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
    466				struct spi_transfer *xfer)
    467{
    468	void __iomem *regs = sdd->regs;
    469	unsigned long val;
    470	u32 status;
    471	int ms;
    472
    473	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
    474	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
    475	ms += 30;               /* some tolerance */
    476	ms = max(ms, 100);      /* minimum timeout */
    477
    478	val = msecs_to_jiffies(ms) + 10;
    479	val = wait_for_completion_timeout(&sdd->xfer_completion, val);
    480
    481	/*
    482	 * If the previous xfer was completed within timeout, then
    483	 * proceed further else return -EIO.
    484	 * DmaTx returns after simply writing data in the FIFO,
    485	 * w/o waiting for real transmission on the bus to finish.
    486	 * DmaRx returns only after Dma read data from FIFO which
    487	 * needs bus transmission to finish, so we don't worry if
    488	 * Xfer involved Rx(with or without Tx).
    489	 */
    490	if (val && !xfer->rx_buf) {
    491		val = msecs_to_loops(10);
    492		status = readl(regs + S3C64XX_SPI_STATUS);
    493		while ((TX_FIFO_LVL(status, sdd)
    494			|| !S3C64XX_SPI_ST_TX_DONE(status, sdd))
    495		       && --val) {
    496			cpu_relax();
    497			status = readl(regs + S3C64XX_SPI_STATUS);
    498		}
    499
    500	}
    501
    502	/* If timed out while checking rx/tx status return error */
    503	if (!val)
    504		return -EIO;
    505
    506	return 0;
    507}
    508
    509static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
    510				struct spi_transfer *xfer)
    511{
    512	void __iomem *regs = sdd->regs;
    513	unsigned long val;
    514	u32 status;
    515	int loops;
    516	u32 cpy_len;
    517	u8 *buf;
    518	int ms;
    519
    520	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
    521	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
    522	ms += 10; /* some tolerance */
    523
    524	val = msecs_to_loops(ms);
    525	do {
    526		status = readl(regs + S3C64XX_SPI_STATUS);
    527	} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
    528
    529	if (!val)
    530		return -EIO;
    531
    532	/* If it was only Tx */
    533	if (!xfer->rx_buf) {
    534		sdd->state &= ~TXBUSY;
    535		return 0;
    536	}
    537
    538	/*
    539	 * If the receive length is bigger than the controller fifo
    540	 * size, calculate the loops and read the fifo as many times.
    541	 * loops = length / max fifo size (calculated by using the
    542	 * fifo mask).
    543	 * For any size less than the fifo size the below code is
    544	 * executed atleast once.
    545	 */
    546	loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
    547	buf = xfer->rx_buf;
    548	do {
    549		/* wait for data to be received in the fifo */
    550		cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
    551						       (loops ? ms : 0));
    552
    553		switch (sdd->cur_bpw) {
    554		case 32:
    555			ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
    556				     buf, cpy_len / 4);
    557			break;
    558		case 16:
    559			ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
    560				     buf, cpy_len / 2);
    561			break;
    562		default:
    563			ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
    564				    buf, cpy_len);
    565			break;
    566		}
    567
    568		buf = buf + cpy_len;
    569	} while (loops--);
    570	sdd->state &= ~RXBUSY;
    571
    572	return 0;
    573}
    574
    575static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
    576{
    577	void __iomem *regs = sdd->regs;
    578	int ret;
    579	u32 val;
    580
    581	/* Disable Clock */
    582	if (!sdd->port_conf->clk_from_cmu) {
    583		val = readl(regs + S3C64XX_SPI_CLK_CFG);
    584		val &= ~S3C64XX_SPI_ENCLK_ENABLE;
    585		writel(val, regs + S3C64XX_SPI_CLK_CFG);
    586	}
    587
    588	/* Set Polarity and Phase */
    589	val = readl(regs + S3C64XX_SPI_CH_CFG);
    590	val &= ~(S3C64XX_SPI_CH_SLAVE |
    591			S3C64XX_SPI_CPOL_L |
    592			S3C64XX_SPI_CPHA_B);
    593
    594	if (sdd->cur_mode & SPI_CPOL)
    595		val |= S3C64XX_SPI_CPOL_L;
    596
    597	if (sdd->cur_mode & SPI_CPHA)
    598		val |= S3C64XX_SPI_CPHA_B;
    599
    600	writel(val, regs + S3C64XX_SPI_CH_CFG);
    601
    602	/* Set Channel & DMA Mode */
    603	val = readl(regs + S3C64XX_SPI_MODE_CFG);
    604	val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
    605			| S3C64XX_SPI_MODE_CH_TSZ_MASK);
    606
    607	switch (sdd->cur_bpw) {
    608	case 32:
    609		val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
    610		val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
    611		break;
    612	case 16:
    613		val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
    614		val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
    615		break;
    616	default:
    617		val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
    618		val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
    619		break;
    620	}
    621
    622	writel(val, regs + S3C64XX_SPI_MODE_CFG);
    623
    624	if (sdd->port_conf->clk_from_cmu) {
    625		/* The src_clk clock is divided internally by 2 */
    626		ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
    627		if (ret)
    628			return ret;
    629		sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
    630	} else {
    631		/* Configure Clock */
    632		val = readl(regs + S3C64XX_SPI_CLK_CFG);
    633		val &= ~S3C64XX_SPI_PSR_MASK;
    634		val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
    635				& S3C64XX_SPI_PSR_MASK);
    636		writel(val, regs + S3C64XX_SPI_CLK_CFG);
    637
    638		/* Enable Clock */
    639		val = readl(regs + S3C64XX_SPI_CLK_CFG);
    640		val |= S3C64XX_SPI_ENCLK_ENABLE;
    641		writel(val, regs + S3C64XX_SPI_CLK_CFG);
    642	}
    643
    644	return 0;
    645}
    646
    647#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
    648
    649static int s3c64xx_spi_prepare_message(struct spi_master *master,
    650				       struct spi_message *msg)
    651{
    652	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
    653	struct spi_device *spi = msg->spi;
    654	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
    655
    656	/* Configure feedback delay */
    657	if (!cs)
    658		/* No delay if not defined */
    659		writel(0, sdd->regs + S3C64XX_SPI_FB_CLK);
    660	else
    661		writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
    662
    663	return 0;
    664}
    665
    666static int s3c64xx_spi_transfer_one(struct spi_master *master,
    667				    struct spi_device *spi,
    668				    struct spi_transfer *xfer)
    669{
    670	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
    671	const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
    672	const void *tx_buf = NULL;
    673	void *rx_buf = NULL;
    674	int target_len = 0, origin_len = 0;
    675	int use_dma = 0;
    676	int status;
    677	u32 speed;
    678	u8 bpw;
    679	unsigned long flags;
    680
    681	reinit_completion(&sdd->xfer_completion);
    682
    683	/* Only BPW and Speed may change across transfers */
    684	bpw = xfer->bits_per_word;
    685	speed = xfer->speed_hz;
    686
    687	if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
    688		sdd->cur_bpw = bpw;
    689		sdd->cur_speed = speed;
    690		sdd->cur_mode = spi->mode;
    691		status = s3c64xx_spi_config(sdd);
    692		if (status)
    693			return status;
    694	}
    695
    696	if (!is_polling(sdd) && (xfer->len > fifo_len) &&
    697	    sdd->rx_dma.ch && sdd->tx_dma.ch) {
    698		use_dma = 1;
    699
    700	} else if (is_polling(sdd) && xfer->len > fifo_len) {
    701		tx_buf = xfer->tx_buf;
    702		rx_buf = xfer->rx_buf;
    703		origin_len = xfer->len;
    704
    705		target_len = xfer->len;
    706		if (xfer->len > fifo_len)
    707			xfer->len = fifo_len;
    708	}
    709
    710	do {
    711		spin_lock_irqsave(&sdd->lock, flags);
    712
    713		/* Pending only which is to be done */
    714		sdd->state &= ~RXBUSY;
    715		sdd->state &= ~TXBUSY;
    716
    717		/* Start the signals */
    718		s3c64xx_spi_set_cs(spi, true);
    719
    720		status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
    721
    722		spin_unlock_irqrestore(&sdd->lock, flags);
    723
    724		if (status) {
    725			dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
    726			break;
    727		}
    728
    729		if (use_dma)
    730			status = s3c64xx_wait_for_dma(sdd, xfer);
    731		else
    732			status = s3c64xx_wait_for_pio(sdd, xfer);
    733
    734		if (status) {
    735			dev_err(&spi->dev,
    736				"I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
    737				xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
    738				(sdd->state & RXBUSY) ? 'f' : 'p',
    739				(sdd->state & TXBUSY) ? 'f' : 'p',
    740				xfer->len, use_dma ? 1 : 0, status);
    741
    742			if (use_dma) {
    743				struct dma_tx_state s;
    744
    745				if (xfer->tx_buf && (sdd->state & TXBUSY)) {
    746					dmaengine_pause(sdd->tx_dma.ch);
    747					dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
    748					dmaengine_terminate_all(sdd->tx_dma.ch);
    749					dev_err(&spi->dev, "TX residue: %d\n", s.residue);
    750
    751				}
    752				if (xfer->rx_buf && (sdd->state & RXBUSY)) {
    753					dmaengine_pause(sdd->rx_dma.ch);
    754					dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
    755					dmaengine_terminate_all(sdd->rx_dma.ch);
    756					dev_err(&spi->dev, "RX residue: %d\n", s.residue);
    757				}
    758			}
    759		} else {
    760			s3c64xx_flush_fifo(sdd);
    761		}
    762		if (target_len > 0) {
    763			target_len -= xfer->len;
    764
    765			if (xfer->tx_buf)
    766				xfer->tx_buf += xfer->len;
    767
    768			if (xfer->rx_buf)
    769				xfer->rx_buf += xfer->len;
    770
    771			if (target_len > fifo_len)
    772				xfer->len = fifo_len;
    773			else
    774				xfer->len = target_len;
    775		}
    776	} while (target_len > 0);
    777
    778	if (origin_len) {
    779		/* Restore original xfer buffers and length */
    780		xfer->tx_buf = tx_buf;
    781		xfer->rx_buf = rx_buf;
    782		xfer->len = origin_len;
    783	}
    784
    785	return status;
    786}
    787
    788static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
    789				struct spi_device *spi)
    790{
    791	struct s3c64xx_spi_csinfo *cs;
    792	struct device_node *slave_np, *data_np = NULL;
    793	u32 fb_delay = 0;
    794
    795	slave_np = spi->dev.of_node;
    796	if (!slave_np) {
    797		dev_err(&spi->dev, "device node not found\n");
    798		return ERR_PTR(-EINVAL);
    799	}
    800
    801	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
    802	if (!cs)
    803		return ERR_PTR(-ENOMEM);
    804
    805	data_np = of_get_child_by_name(slave_np, "controller-data");
    806	if (!data_np) {
    807		dev_info(&spi->dev, "feedback delay set to default (0)\n");
    808		return cs;
    809	}
    810
    811	of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
    812	cs->fb_delay = fb_delay;
    813	of_node_put(data_np);
    814	return cs;
    815}
    816
    817/*
    818 * Here we only check the validity of requested configuration
    819 * and save the configuration in a local data-structure.
    820 * The controller is actually configured only just before we
    821 * get a message to transfer.
    822 */
    823static int s3c64xx_spi_setup(struct spi_device *spi)
    824{
    825	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
    826	struct s3c64xx_spi_driver_data *sdd;
    827	int err;
    828
    829	sdd = spi_master_get_devdata(spi->master);
    830	if (spi->dev.of_node) {
    831		cs = s3c64xx_get_slave_ctrldata(spi);
    832		spi->controller_data = cs;
    833	}
    834
    835	/* NULL is fine, we just avoid using the FB delay (=0) */
    836	if (IS_ERR(cs)) {
    837		dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
    838		return -ENODEV;
    839	}
    840
    841	if (!spi_get_ctldata(spi))
    842		spi_set_ctldata(spi, cs);
    843
    844	pm_runtime_get_sync(&sdd->pdev->dev);
    845
    846	/* Check if we can provide the requested rate */
    847	if (!sdd->port_conf->clk_from_cmu) {
    848		u32 psr, speed;
    849
    850		/* Max possible */
    851		speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
    852
    853		if (spi->max_speed_hz > speed)
    854			spi->max_speed_hz = speed;
    855
    856		psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
    857		psr &= S3C64XX_SPI_PSR_MASK;
    858		if (psr == S3C64XX_SPI_PSR_MASK)
    859			psr--;
    860
    861		speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
    862		if (spi->max_speed_hz < speed) {
    863			if (psr+1 < S3C64XX_SPI_PSR_MASK) {
    864				psr++;
    865			} else {
    866				err = -EINVAL;
    867				goto setup_exit;
    868			}
    869		}
    870
    871		speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
    872		if (spi->max_speed_hz >= speed) {
    873			spi->max_speed_hz = speed;
    874		} else {
    875			dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
    876				spi->max_speed_hz);
    877			err = -EINVAL;
    878			goto setup_exit;
    879		}
    880	}
    881
    882	pm_runtime_mark_last_busy(&sdd->pdev->dev);
    883	pm_runtime_put_autosuspend(&sdd->pdev->dev);
    884	s3c64xx_spi_set_cs(spi, false);
    885
    886	return 0;
    887
    888setup_exit:
    889	pm_runtime_mark_last_busy(&sdd->pdev->dev);
    890	pm_runtime_put_autosuspend(&sdd->pdev->dev);
    891	/* setup() returns with device de-selected */
    892	s3c64xx_spi_set_cs(spi, false);
    893
    894	spi_set_ctldata(spi, NULL);
    895
    896	/* This was dynamically allocated on the DT path */
    897	if (spi->dev.of_node)
    898		kfree(cs);
    899
    900	return err;
    901}
    902
    903static void s3c64xx_spi_cleanup(struct spi_device *spi)
    904{
    905	struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
    906
    907	/* This was dynamically allocated on the DT path */
    908	if (spi->dev.of_node)
    909		kfree(cs);
    910
    911	spi_set_ctldata(spi, NULL);
    912}
    913
    914static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
    915{
    916	struct s3c64xx_spi_driver_data *sdd = data;
    917	struct spi_master *spi = sdd->master;
    918	unsigned int val, clr = 0;
    919
    920	val = readl(sdd->regs + S3C64XX_SPI_STATUS);
    921
    922	if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
    923		clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
    924		dev_err(&spi->dev, "RX overrun\n");
    925	}
    926	if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
    927		clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
    928		dev_err(&spi->dev, "RX underrun\n");
    929	}
    930	if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
    931		clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
    932		dev_err(&spi->dev, "TX overrun\n");
    933	}
    934	if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
    935		clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
    936		dev_err(&spi->dev, "TX underrun\n");
    937	}
    938
    939	/* Clear the pending irq by setting and then clearing it */
    940	writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
    941	writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
    942
    943	return IRQ_HANDLED;
    944}
    945
    946static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
    947{
    948	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
    949	void __iomem *regs = sdd->regs;
    950	unsigned int val;
    951
    952	sdd->cur_speed = 0;
    953
    954	if (sci->no_cs)
    955		writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
    956	else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
    957		writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
    958
    959	/* Disable Interrupts - we use Polling if not DMA mode */
    960	writel(0, regs + S3C64XX_SPI_INT_EN);
    961
    962	if (!sdd->port_conf->clk_from_cmu)
    963		writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
    964				regs + S3C64XX_SPI_CLK_CFG);
    965	writel(0, regs + S3C64XX_SPI_MODE_CFG);
    966	writel(0, regs + S3C64XX_SPI_PACKET_CNT);
    967
    968	/* Clear any irq pending bits, should set and clear the bits */
    969	val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
    970		S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
    971		S3C64XX_SPI_PND_TX_OVERRUN_CLR |
    972		S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
    973	writel(val, regs + S3C64XX_SPI_PENDING_CLR);
    974	writel(0, regs + S3C64XX_SPI_PENDING_CLR);
    975
    976	writel(0, regs + S3C64XX_SPI_SWAP_CFG);
    977
    978	val = readl(regs + S3C64XX_SPI_MODE_CFG);
    979	val &= ~S3C64XX_SPI_MODE_4BURST;
    980	val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
    981	val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
    982	writel(val, regs + S3C64XX_SPI_MODE_CFG);
    983
    984	s3c64xx_flush_fifo(sdd);
    985}
    986
    987#ifdef CONFIG_OF
    988static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
    989{
    990	struct s3c64xx_spi_info *sci;
    991	u32 temp;
    992
    993	sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
    994	if (!sci)
    995		return ERR_PTR(-ENOMEM);
    996
    997	if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
    998		dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
    999		sci->src_clk_nr = 0;
   1000	} else {
   1001		sci->src_clk_nr = temp;
   1002	}
   1003
   1004	if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
   1005		dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
   1006		sci->num_cs = 1;
   1007	} else {
   1008		sci->num_cs = temp;
   1009	}
   1010
   1011	sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
   1012
   1013	return sci;
   1014}
   1015#else
   1016static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
   1017{
   1018	return dev_get_platdata(dev);
   1019}
   1020#endif
   1021
   1022static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
   1023						struct platform_device *pdev)
   1024{
   1025#ifdef CONFIG_OF
   1026	if (pdev->dev.of_node)
   1027		return of_device_get_match_data(&pdev->dev);
   1028#endif
   1029	return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
   1030}
   1031
   1032static int s3c64xx_spi_probe(struct platform_device *pdev)
   1033{
   1034	struct resource	*mem_res;
   1035	struct s3c64xx_spi_driver_data *sdd;
   1036	struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
   1037	struct spi_master *master;
   1038	int ret, irq;
   1039	char clk_name[16];
   1040
   1041	if (!sci && pdev->dev.of_node) {
   1042		sci = s3c64xx_spi_parse_dt(&pdev->dev);
   1043		if (IS_ERR(sci))
   1044			return PTR_ERR(sci);
   1045	}
   1046
   1047	if (!sci) {
   1048		dev_err(&pdev->dev, "platform_data missing!\n");
   1049		return -ENODEV;
   1050	}
   1051
   1052	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1053	if (mem_res == NULL) {
   1054		dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
   1055		return -ENXIO;
   1056	}
   1057
   1058	irq = platform_get_irq(pdev, 0);
   1059	if (irq < 0) {
   1060		dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
   1061		return irq;
   1062	}
   1063
   1064	master = spi_alloc_master(&pdev->dev,
   1065				sizeof(struct s3c64xx_spi_driver_data));
   1066	if (master == NULL) {
   1067		dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
   1068		return -ENOMEM;
   1069	}
   1070
   1071	platform_set_drvdata(pdev, master);
   1072
   1073	sdd = spi_master_get_devdata(master);
   1074	sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
   1075	sdd->master = master;
   1076	sdd->cntrlr_info = sci;
   1077	sdd->pdev = pdev;
   1078	sdd->sfr_start = mem_res->start;
   1079	if (pdev->dev.of_node) {
   1080		ret = of_alias_get_id(pdev->dev.of_node, "spi");
   1081		if (ret < 0) {
   1082			dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
   1083				ret);
   1084			goto err_deref_master;
   1085		}
   1086		sdd->port_id = ret;
   1087	} else {
   1088		sdd->port_id = pdev->id;
   1089	}
   1090
   1091	sdd->cur_bpw = 8;
   1092
   1093	sdd->tx_dma.direction = DMA_MEM_TO_DEV;
   1094	sdd->rx_dma.direction = DMA_DEV_TO_MEM;
   1095
   1096	master->dev.of_node = pdev->dev.of_node;
   1097	master->bus_num = sdd->port_id;
   1098	master->setup = s3c64xx_spi_setup;
   1099	master->cleanup = s3c64xx_spi_cleanup;
   1100	master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
   1101	master->prepare_message = s3c64xx_spi_prepare_message;
   1102	master->transfer_one = s3c64xx_spi_transfer_one;
   1103	master->num_chipselect = sci->num_cs;
   1104	master->use_gpio_descriptors = true;
   1105	master->dma_alignment = 8;
   1106	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
   1107					SPI_BPW_MASK(8);
   1108	/* the spi->mode bits understood by this driver: */
   1109	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
   1110	master->auto_runtime_pm = true;
   1111	if (!is_polling(sdd))
   1112		master->can_dma = s3c64xx_spi_can_dma;
   1113
   1114	sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
   1115	if (IS_ERR(sdd->regs)) {
   1116		ret = PTR_ERR(sdd->regs);
   1117		goto err_deref_master;
   1118	}
   1119
   1120	if (sci->cfg_gpio && sci->cfg_gpio()) {
   1121		dev_err(&pdev->dev, "Unable to config gpio\n");
   1122		ret = -EBUSY;
   1123		goto err_deref_master;
   1124	}
   1125
   1126	/* Setup clocks */
   1127	sdd->clk = devm_clk_get(&pdev->dev, "spi");
   1128	if (IS_ERR(sdd->clk)) {
   1129		dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
   1130		ret = PTR_ERR(sdd->clk);
   1131		goto err_deref_master;
   1132	}
   1133
   1134	ret = clk_prepare_enable(sdd->clk);
   1135	if (ret) {
   1136		dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
   1137		goto err_deref_master;
   1138	}
   1139
   1140	sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
   1141	sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
   1142	if (IS_ERR(sdd->src_clk)) {
   1143		dev_err(&pdev->dev,
   1144			"Unable to acquire clock '%s'\n", clk_name);
   1145		ret = PTR_ERR(sdd->src_clk);
   1146		goto err_disable_clk;
   1147	}
   1148
   1149	ret = clk_prepare_enable(sdd->src_clk);
   1150	if (ret) {
   1151		dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
   1152		goto err_disable_clk;
   1153	}
   1154
   1155	if (sdd->port_conf->clk_ioclk) {
   1156		sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
   1157		if (IS_ERR(sdd->ioclk)) {
   1158			dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
   1159			ret = PTR_ERR(sdd->ioclk);
   1160			goto err_disable_src_clk;
   1161		}
   1162
   1163		ret = clk_prepare_enable(sdd->ioclk);
   1164		if (ret) {
   1165			dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
   1166			goto err_disable_src_clk;
   1167		}
   1168	}
   1169
   1170	if (!is_polling(sdd)) {
   1171		/* Acquire DMA channels */
   1172		sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
   1173		if (IS_ERR(sdd->rx_dma.ch)) {
   1174			dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
   1175			ret = PTR_ERR(sdd->rx_dma.ch);
   1176			goto err_disable_io_clk;
   1177		}
   1178		sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
   1179		if (IS_ERR(sdd->tx_dma.ch)) {
   1180			dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
   1181			ret = PTR_ERR(sdd->tx_dma.ch);
   1182			goto err_release_rx_dma;
   1183		}
   1184	}
   1185
   1186	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
   1187	pm_runtime_use_autosuspend(&pdev->dev);
   1188	pm_runtime_set_active(&pdev->dev);
   1189	pm_runtime_enable(&pdev->dev);
   1190	pm_runtime_get_sync(&pdev->dev);
   1191
   1192	/* Setup Deufult Mode */
   1193	s3c64xx_spi_hwinit(sdd);
   1194
   1195	spin_lock_init(&sdd->lock);
   1196	init_completion(&sdd->xfer_completion);
   1197
   1198	ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
   1199				"spi-s3c64xx", sdd);
   1200	if (ret != 0) {
   1201		dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
   1202			irq, ret);
   1203		goto err_pm_put;
   1204	}
   1205
   1206	writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
   1207	       S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
   1208	       sdd->regs + S3C64XX_SPI_INT_EN);
   1209
   1210	ret = devm_spi_register_master(&pdev->dev, master);
   1211	if (ret != 0) {
   1212		dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
   1213		goto err_pm_put;
   1214	}
   1215
   1216	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
   1217					sdd->port_id, master->num_chipselect);
   1218	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
   1219					mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
   1220
   1221	pm_runtime_mark_last_busy(&pdev->dev);
   1222	pm_runtime_put_autosuspend(&pdev->dev);
   1223
   1224	return 0;
   1225
   1226err_pm_put:
   1227	pm_runtime_put_noidle(&pdev->dev);
   1228	pm_runtime_disable(&pdev->dev);
   1229	pm_runtime_set_suspended(&pdev->dev);
   1230
   1231	if (!is_polling(sdd))
   1232		dma_release_channel(sdd->tx_dma.ch);
   1233err_release_rx_dma:
   1234	if (!is_polling(sdd))
   1235		dma_release_channel(sdd->rx_dma.ch);
   1236err_disable_io_clk:
   1237	clk_disable_unprepare(sdd->ioclk);
   1238err_disable_src_clk:
   1239	clk_disable_unprepare(sdd->src_clk);
   1240err_disable_clk:
   1241	clk_disable_unprepare(sdd->clk);
   1242err_deref_master:
   1243	spi_master_put(master);
   1244
   1245	return ret;
   1246}
   1247
   1248static int s3c64xx_spi_remove(struct platform_device *pdev)
   1249{
   1250	struct spi_master *master = platform_get_drvdata(pdev);
   1251	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
   1252
   1253	pm_runtime_get_sync(&pdev->dev);
   1254
   1255	writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
   1256
   1257	if (!is_polling(sdd)) {
   1258		dma_release_channel(sdd->rx_dma.ch);
   1259		dma_release_channel(sdd->tx_dma.ch);
   1260	}
   1261
   1262	clk_disable_unprepare(sdd->ioclk);
   1263
   1264	clk_disable_unprepare(sdd->src_clk);
   1265
   1266	clk_disable_unprepare(sdd->clk);
   1267
   1268	pm_runtime_put_noidle(&pdev->dev);
   1269	pm_runtime_disable(&pdev->dev);
   1270	pm_runtime_set_suspended(&pdev->dev);
   1271
   1272	return 0;
   1273}
   1274
   1275#ifdef CONFIG_PM_SLEEP
   1276static int s3c64xx_spi_suspend(struct device *dev)
   1277{
   1278	struct spi_master *master = dev_get_drvdata(dev);
   1279	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
   1280
   1281	int ret = spi_master_suspend(master);
   1282	if (ret)
   1283		return ret;
   1284
   1285	ret = pm_runtime_force_suspend(dev);
   1286	if (ret < 0)
   1287		return ret;
   1288
   1289	sdd->cur_speed = 0; /* Output Clock is stopped */
   1290
   1291	return 0;
   1292}
   1293
   1294static int s3c64xx_spi_resume(struct device *dev)
   1295{
   1296	struct spi_master *master = dev_get_drvdata(dev);
   1297	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
   1298	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
   1299	int ret;
   1300
   1301	if (sci->cfg_gpio)
   1302		sci->cfg_gpio();
   1303
   1304	ret = pm_runtime_force_resume(dev);
   1305	if (ret < 0)
   1306		return ret;
   1307
   1308	return spi_master_resume(master);
   1309}
   1310#endif /* CONFIG_PM_SLEEP */
   1311
   1312#ifdef CONFIG_PM
   1313static int s3c64xx_spi_runtime_suspend(struct device *dev)
   1314{
   1315	struct spi_master *master = dev_get_drvdata(dev);
   1316	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
   1317
   1318	clk_disable_unprepare(sdd->clk);
   1319	clk_disable_unprepare(sdd->src_clk);
   1320	clk_disable_unprepare(sdd->ioclk);
   1321
   1322	return 0;
   1323}
   1324
   1325static int s3c64xx_spi_runtime_resume(struct device *dev)
   1326{
   1327	struct spi_master *master = dev_get_drvdata(dev);
   1328	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
   1329	int ret;
   1330
   1331	if (sdd->port_conf->clk_ioclk) {
   1332		ret = clk_prepare_enable(sdd->ioclk);
   1333		if (ret != 0)
   1334			return ret;
   1335	}
   1336
   1337	ret = clk_prepare_enable(sdd->src_clk);
   1338	if (ret != 0)
   1339		goto err_disable_ioclk;
   1340
   1341	ret = clk_prepare_enable(sdd->clk);
   1342	if (ret != 0)
   1343		goto err_disable_src_clk;
   1344
   1345	s3c64xx_spi_hwinit(sdd);
   1346
   1347	writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
   1348	       S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
   1349	       sdd->regs + S3C64XX_SPI_INT_EN);
   1350
   1351	return 0;
   1352
   1353err_disable_src_clk:
   1354	clk_disable_unprepare(sdd->src_clk);
   1355err_disable_ioclk:
   1356	clk_disable_unprepare(sdd->ioclk);
   1357
   1358	return ret;
   1359}
   1360#endif /* CONFIG_PM */
   1361
   1362static const struct dev_pm_ops s3c64xx_spi_pm = {
   1363	SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
   1364	SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
   1365			   s3c64xx_spi_runtime_resume, NULL)
   1366};
   1367
   1368static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
   1369	.fifo_lvl_mask	= { 0x7f },
   1370	.rx_lvl_offset	= 13,
   1371	.tx_st_done	= 21,
   1372	.high_speed	= true,
   1373};
   1374
   1375static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
   1376	.fifo_lvl_mask	= { 0x7f, 0x7F },
   1377	.rx_lvl_offset	= 13,
   1378	.tx_st_done	= 21,
   1379};
   1380
   1381static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
   1382	.fifo_lvl_mask	= { 0x1ff, 0x7F },
   1383	.rx_lvl_offset	= 15,
   1384	.tx_st_done	= 25,
   1385	.high_speed	= true,
   1386};
   1387
   1388static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
   1389	.fifo_lvl_mask	= { 0x1ff, 0x7F, 0x7F },
   1390	.rx_lvl_offset	= 15,
   1391	.tx_st_done	= 25,
   1392	.high_speed	= true,
   1393	.clk_from_cmu	= true,
   1394	.quirks		= S3C64XX_SPI_QUIRK_CS_AUTO,
   1395};
   1396
   1397static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
   1398	.fifo_lvl_mask	= { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
   1399	.rx_lvl_offset	= 15,
   1400	.tx_st_done	= 25,
   1401	.high_speed	= true,
   1402	.clk_from_cmu	= true,
   1403	.quirks		= S3C64XX_SPI_QUIRK_CS_AUTO,
   1404};
   1405
   1406static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
   1407	.fifo_lvl_mask	= { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
   1408	.rx_lvl_offset	= 15,
   1409	.tx_st_done	= 25,
   1410	.high_speed	= true,
   1411	.clk_from_cmu	= true,
   1412	.clk_ioclk	= true,
   1413	.quirks		= S3C64XX_SPI_QUIRK_CS_AUTO,
   1414};
   1415
   1416static struct s3c64xx_spi_port_config fsd_spi_port_config = {
   1417	.fifo_lvl_mask	= { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
   1418	.rx_lvl_offset	= 15,
   1419	.tx_st_done	= 25,
   1420	.high_speed	= true,
   1421	.clk_from_cmu	= true,
   1422	.clk_ioclk	= false,
   1423	.quirks		= S3C64XX_SPI_QUIRK_CS_AUTO,
   1424};
   1425
   1426static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
   1427	{
   1428		.name		= "s3c2443-spi",
   1429		.driver_data	= (kernel_ulong_t)&s3c2443_spi_port_config,
   1430	}, {
   1431		.name		= "s3c6410-spi",
   1432		.driver_data	= (kernel_ulong_t)&s3c6410_spi_port_config,
   1433	},
   1434	{ },
   1435};
   1436
   1437static const struct of_device_id s3c64xx_spi_dt_match[] = {
   1438	{ .compatible = "samsung,s3c2443-spi",
   1439			.data = (void *)&s3c2443_spi_port_config,
   1440	},
   1441	{ .compatible = "samsung,s3c6410-spi",
   1442			.data = (void *)&s3c6410_spi_port_config,
   1443	},
   1444	{ .compatible = "samsung,s5pv210-spi",
   1445			.data = (void *)&s5pv210_spi_port_config,
   1446	},
   1447	{ .compatible = "samsung,exynos4210-spi",
   1448			.data = (void *)&exynos4_spi_port_config,
   1449	},
   1450	{ .compatible = "samsung,exynos7-spi",
   1451			.data = (void *)&exynos7_spi_port_config,
   1452	},
   1453	{ .compatible = "samsung,exynos5433-spi",
   1454			.data = (void *)&exynos5433_spi_port_config,
   1455	},
   1456	{ .compatible = "tesla,fsd-spi",
   1457			.data = (void *)&fsd_spi_port_config,
   1458	},
   1459	{ },
   1460};
   1461MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
   1462
   1463static struct platform_driver s3c64xx_spi_driver = {
   1464	.driver = {
   1465		.name	= "s3c64xx-spi",
   1466		.pm = &s3c64xx_spi_pm,
   1467		.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
   1468	},
   1469	.probe = s3c64xx_spi_probe,
   1470	.remove = s3c64xx_spi_remove,
   1471	.id_table = s3c64xx_spi_driver_ids,
   1472};
   1473MODULE_ALIAS("platform:s3c64xx-spi");
   1474
   1475module_platform_driver(s3c64xx_spi_driver);
   1476
   1477MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
   1478MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
   1479MODULE_LICENSE("GPL");