cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sh_mmcif.c (41889B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * MMCIF eMMC driver.
      4 *
      5 * Copyright (C) 2010 Renesas Solutions Corp.
      6 * Yusuke Goda <yusuke.goda.sx@renesas.com>
      7 */
      8
      9/*
     10 * The MMCIF driver is now processing MMC requests asynchronously, according
     11 * to the Linux MMC API requirement.
     12 *
     13 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
     14 * data, and optional stop. To achieve asynchronous processing each of these
     15 * stages is split into two halves: a top and a bottom half. The top half
     16 * initialises the hardware, installs a timeout handler to handle completion
     17 * timeouts, and returns. In case of the command stage this immediately returns
     18 * control to the caller, leaving all further processing to run asynchronously.
     19 * All further request processing is performed by the bottom halves.
     20 *
     21 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
     22 * thread, a DMA completion callback, if DMA is used, a timeout work, and
     23 * request- and stage-specific handler methods.
     24 *
     25 * Each bottom half run begins with either a hardware interrupt, a DMA callback
     26 * invocation, or a timeout work run. In case of an error or a successful
     27 * processing completion, the MMC core is informed and the request processing is
     28 * finished. In case processing has to continue, i.e., if data has to be read
     29 * from or written to the card, or if a stop command has to be sent, the next
     30 * top half is called, which performs the necessary hardware handling and
     31 * reschedules the timeout work. This returns the driver state machine into the
     32 * bottom half waiting state.
     33 */
     34
     35#include <linux/bitops.h>
     36#include <linux/clk.h>
     37#include <linux/completion.h>
     38#include <linux/delay.h>
     39#include <linux/dma-mapping.h>
     40#include <linux/dmaengine.h>
     41#include <linux/mmc/card.h>
     42#include <linux/mmc/core.h>
     43#include <linux/mmc/host.h>
     44#include <linux/mmc/mmc.h>
     45#include <linux/mmc/sdio.h>
     46#include <linux/mmc/slot-gpio.h>
     47#include <linux/mod_devicetable.h>
     48#include <linux/mutex.h>
     49#include <linux/of_device.h>
     50#include <linux/pagemap.h>
     51#include <linux/platform_data/sh_mmcif.h>
     52#include <linux/platform_device.h>
     53#include <linux/pm_qos.h>
     54#include <linux/pm_runtime.h>
     55#include <linux/sh_dma.h>
     56#include <linux/spinlock.h>
     57#include <linux/module.h>
     58
     59#define DRIVER_NAME	"sh_mmcif"
     60
     61/* CE_CMD_SET */
     62#define CMD_MASK		0x3f000000
     63#define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
     64#define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
     65#define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
     66#define CMD_SET_RBSY		(1 << 21) /* R1b */
     67#define CMD_SET_CCSEN		(1 << 20)
     68#define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
     69#define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
     70#define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
     71#define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
     72#define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
     73#define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
     74#define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
     75#define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
     76#define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
     77#define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
     78#define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
     79#define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
     80#define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
     81#define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
     82#define CMD_SET_CCSH		(1 << 5)
     83#define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
     84#define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
     85#define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
     86#define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
     87
     88/* CE_CMD_CTRL */
     89#define CMD_CTRL_BREAK		(1 << 0)
     90
     91/* CE_BLOCK_SET */
     92#define BLOCK_SIZE_MASK		0x0000ffff
     93
     94/* CE_INT */
     95#define INT_CCSDE		(1 << 29)
     96#define INT_CMD12DRE		(1 << 26)
     97#define INT_CMD12RBE		(1 << 25)
     98#define INT_CMD12CRE		(1 << 24)
     99#define INT_DTRANE		(1 << 23)
    100#define INT_BUFRE		(1 << 22)
    101#define INT_BUFWEN		(1 << 21)
    102#define INT_BUFREN		(1 << 20)
    103#define INT_CCSRCV		(1 << 19)
    104#define INT_RBSYE		(1 << 17)
    105#define INT_CRSPE		(1 << 16)
    106#define INT_CMDVIO		(1 << 15)
    107#define INT_BUFVIO		(1 << 14)
    108#define INT_WDATERR		(1 << 11)
    109#define INT_RDATERR		(1 << 10)
    110#define INT_RIDXERR		(1 << 9)
    111#define INT_RSPERR		(1 << 8)
    112#define INT_CCSTO		(1 << 5)
    113#define INT_CRCSTO		(1 << 4)
    114#define INT_WDATTO		(1 << 3)
    115#define INT_RDATTO		(1 << 2)
    116#define INT_RBSYTO		(1 << 1)
    117#define INT_RSPTO		(1 << 0)
    118#define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
    119				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
    120				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
    121				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
    122
    123#define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
    124				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
    125				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
    126
    127#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
    128
    129/* CE_INT_MASK */
    130#define MASK_ALL		0x00000000
    131#define MASK_MCCSDE		(1 << 29)
    132#define MASK_MCMD12DRE		(1 << 26)
    133#define MASK_MCMD12RBE		(1 << 25)
    134#define MASK_MCMD12CRE		(1 << 24)
    135#define MASK_MDTRANE		(1 << 23)
    136#define MASK_MBUFRE		(1 << 22)
    137#define MASK_MBUFWEN		(1 << 21)
    138#define MASK_MBUFREN		(1 << 20)
    139#define MASK_MCCSRCV		(1 << 19)
    140#define MASK_MRBSYE		(1 << 17)
    141#define MASK_MCRSPE		(1 << 16)
    142#define MASK_MCMDVIO		(1 << 15)
    143#define MASK_MBUFVIO		(1 << 14)
    144#define MASK_MWDATERR		(1 << 11)
    145#define MASK_MRDATERR		(1 << 10)
    146#define MASK_MRIDXERR		(1 << 9)
    147#define MASK_MRSPERR		(1 << 8)
    148#define MASK_MCCSTO		(1 << 5)
    149#define MASK_MCRCSTO		(1 << 4)
    150#define MASK_MWDATTO		(1 << 3)
    151#define MASK_MRDATTO		(1 << 2)
    152#define MASK_MRBSYTO		(1 << 1)
    153#define MASK_MRSPTO		(1 << 0)
    154
    155#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
    156				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
    157				 MASK_MCRCSTO | MASK_MWDATTO | \
    158				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
    159
    160#define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
    161				 MASK_MBUFREN | MASK_MBUFWEN |			\
    162				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
    163				 MASK_MCMD12RBE | MASK_MCMD12CRE)
    164
    165/* CE_HOST_STS1 */
    166#define STS1_CMDSEQ		(1 << 31)
    167
    168/* CE_HOST_STS2 */
    169#define STS2_CRCSTE		(1 << 31)
    170#define STS2_CRC16E		(1 << 30)
    171#define STS2_AC12CRCE		(1 << 29)
    172#define STS2_RSPCRC7E		(1 << 28)
    173#define STS2_CRCSTEBE		(1 << 27)
    174#define STS2_RDATEBE		(1 << 26)
    175#define STS2_AC12REBE		(1 << 25)
    176#define STS2_RSPEBE		(1 << 24)
    177#define STS2_AC12IDXE		(1 << 23)
    178#define STS2_RSPIDXE		(1 << 22)
    179#define STS2_CCSTO		(1 << 15)
    180#define STS2_RDATTO		(1 << 14)
    181#define STS2_DATBSYTO		(1 << 13)
    182#define STS2_CRCSTTO		(1 << 12)
    183#define STS2_AC12BSYTO		(1 << 11)
    184#define STS2_RSPBSYTO		(1 << 10)
    185#define STS2_AC12RSPTO		(1 << 9)
    186#define STS2_RSPTO		(1 << 8)
    187#define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
    188				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
    189#define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
    190				 STS2_DATBSYTO | STS2_CRCSTTO |		\
    191				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
    192				 STS2_AC12RSPTO | STS2_RSPTO)
    193
    194#define CLKDEV_EMMC_DATA	52000000 /* 52 MHz */
    195#define CLKDEV_MMC_DATA		20000000 /* 20 MHz */
    196#define CLKDEV_INIT		400000   /* 400 kHz */
    197
    198enum sh_mmcif_state {
    199	STATE_IDLE,
    200	STATE_REQUEST,
    201	STATE_IOS,
    202	STATE_TIMEOUT,
    203};
    204
    205enum sh_mmcif_wait_for {
    206	MMCIF_WAIT_FOR_REQUEST,
    207	MMCIF_WAIT_FOR_CMD,
    208	MMCIF_WAIT_FOR_MREAD,
    209	MMCIF_WAIT_FOR_MWRITE,
    210	MMCIF_WAIT_FOR_READ,
    211	MMCIF_WAIT_FOR_WRITE,
    212	MMCIF_WAIT_FOR_READ_END,
    213	MMCIF_WAIT_FOR_WRITE_END,
    214	MMCIF_WAIT_FOR_STOP,
    215};
    216
    217/*
    218 * difference for each SoC
    219 */
    220struct sh_mmcif_host {
    221	struct mmc_host *mmc;
    222	struct mmc_request *mrq;
    223	struct platform_device *pd;
    224	struct clk *clk;
    225	int bus_width;
    226	unsigned char timing;
    227	bool sd_error;
    228	bool dying;
    229	long timeout;
    230	void __iomem *addr;
    231	u32 *pio_ptr;
    232	spinlock_t lock;		/* protect sh_mmcif_host::state */
    233	enum sh_mmcif_state state;
    234	enum sh_mmcif_wait_for wait_for;
    235	struct delayed_work timeout_work;
    236	size_t blocksize;
    237	int sg_idx;
    238	int sg_blkidx;
    239	bool power;
    240	bool ccs_enable;		/* Command Completion Signal support */
    241	bool clk_ctrl2_enable;
    242	struct mutex thread_lock;
    243	u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
    244
    245	/* DMA support */
    246	struct dma_chan		*chan_rx;
    247	struct dma_chan		*chan_tx;
    248	struct completion	dma_complete;
    249	bool			dma_active;
    250};
    251
    252static const struct of_device_id sh_mmcif_of_match[] = {
    253	{ .compatible = "renesas,sh-mmcif" },
    254	{ }
    255};
    256MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
    257
    258#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
    259
    260static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
    261					unsigned int reg, u32 val)
    262{
    263	writel(val | readl(host->addr + reg), host->addr + reg);
    264}
    265
    266static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
    267					unsigned int reg, u32 val)
    268{
    269	writel(~val & readl(host->addr + reg), host->addr + reg);
    270}
    271
    272static void sh_mmcif_dma_complete(void *arg)
    273{
    274	struct sh_mmcif_host *host = arg;
    275	struct mmc_request *mrq = host->mrq;
    276	struct device *dev = sh_mmcif_host_to_dev(host);
    277
    278	dev_dbg(dev, "Command completed\n");
    279
    280	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
    281		 dev_name(dev)))
    282		return;
    283
    284	complete(&host->dma_complete);
    285}
    286
    287static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
    288{
    289	struct mmc_data *data = host->mrq->data;
    290	struct scatterlist *sg = data->sg;
    291	struct dma_async_tx_descriptor *desc = NULL;
    292	struct dma_chan *chan = host->chan_rx;
    293	struct device *dev = sh_mmcif_host_to_dev(host);
    294	dma_cookie_t cookie = -EINVAL;
    295	int ret;
    296
    297	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
    298			 DMA_FROM_DEVICE);
    299	if (ret > 0) {
    300		host->dma_active = true;
    301		desc = dmaengine_prep_slave_sg(chan, sg, ret,
    302			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    303	}
    304
    305	if (desc) {
    306		desc->callback = sh_mmcif_dma_complete;
    307		desc->callback_param = host;
    308		cookie = dmaengine_submit(desc);
    309		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
    310		dma_async_issue_pending(chan);
    311	}
    312	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
    313		__func__, data->sg_len, ret, cookie);
    314
    315	if (!desc) {
    316		/* DMA failed, fall back to PIO */
    317		if (ret >= 0)
    318			ret = -EIO;
    319		host->chan_rx = NULL;
    320		host->dma_active = false;
    321		dma_release_channel(chan);
    322		/* Free the Tx channel too */
    323		chan = host->chan_tx;
    324		if (chan) {
    325			host->chan_tx = NULL;
    326			dma_release_channel(chan);
    327		}
    328		dev_warn(dev,
    329			 "DMA failed: %d, falling back to PIO\n", ret);
    330		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
    331	}
    332
    333	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
    334		desc, cookie, data->sg_len);
    335}
    336
    337static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
    338{
    339	struct mmc_data *data = host->mrq->data;
    340	struct scatterlist *sg = data->sg;
    341	struct dma_async_tx_descriptor *desc = NULL;
    342	struct dma_chan *chan = host->chan_tx;
    343	struct device *dev = sh_mmcif_host_to_dev(host);
    344	dma_cookie_t cookie = -EINVAL;
    345	int ret;
    346
    347	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
    348			 DMA_TO_DEVICE);
    349	if (ret > 0) {
    350		host->dma_active = true;
    351		desc = dmaengine_prep_slave_sg(chan, sg, ret,
    352			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    353	}
    354
    355	if (desc) {
    356		desc->callback = sh_mmcif_dma_complete;
    357		desc->callback_param = host;
    358		cookie = dmaengine_submit(desc);
    359		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
    360		dma_async_issue_pending(chan);
    361	}
    362	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
    363		__func__, data->sg_len, ret, cookie);
    364
    365	if (!desc) {
    366		/* DMA failed, fall back to PIO */
    367		if (ret >= 0)
    368			ret = -EIO;
    369		host->chan_tx = NULL;
    370		host->dma_active = false;
    371		dma_release_channel(chan);
    372		/* Free the Rx channel too */
    373		chan = host->chan_rx;
    374		if (chan) {
    375			host->chan_rx = NULL;
    376			dma_release_channel(chan);
    377		}
    378		dev_warn(dev,
    379			 "DMA failed: %d, falling back to PIO\n", ret);
    380		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
    381	}
    382
    383	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
    384		desc, cookie);
    385}
    386
    387static struct dma_chan *
    388sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
    389{
    390	dma_cap_mask_t mask;
    391
    392	dma_cap_zero(mask);
    393	dma_cap_set(DMA_SLAVE, mask);
    394	if (slave_id <= 0)
    395		return NULL;
    396
    397	return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
    398}
    399
    400static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
    401				     struct dma_chan *chan,
    402				     enum dma_transfer_direction direction)
    403{
    404	struct resource *res;
    405	struct dma_slave_config cfg = { 0, };
    406
    407	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
    408	if (!res)
    409		return -EINVAL;
    410
    411	cfg.direction = direction;
    412
    413	if (direction == DMA_DEV_TO_MEM) {
    414		cfg.src_addr = res->start + MMCIF_CE_DATA;
    415		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    416	} else {
    417		cfg.dst_addr = res->start + MMCIF_CE_DATA;
    418		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    419	}
    420
    421	return dmaengine_slave_config(chan, &cfg);
    422}
    423
    424static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
    425{
    426	struct device *dev = sh_mmcif_host_to_dev(host);
    427	host->dma_active = false;
    428
    429	/* We can only either use DMA for both Tx and Rx or not use it at all */
    430	if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
    431		struct sh_mmcif_plat_data *pdata = dev->platform_data;
    432
    433		host->chan_tx = sh_mmcif_request_dma_pdata(host,
    434							pdata->slave_id_tx);
    435		host->chan_rx = sh_mmcif_request_dma_pdata(host,
    436							pdata->slave_id_rx);
    437	} else {
    438		host->chan_tx = dma_request_chan(dev, "tx");
    439		if (IS_ERR(host->chan_tx))
    440			host->chan_tx = NULL;
    441		host->chan_rx = dma_request_chan(dev, "rx");
    442		if (IS_ERR(host->chan_rx))
    443			host->chan_rx = NULL;
    444	}
    445	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
    446		host->chan_rx);
    447
    448	if (!host->chan_tx || !host->chan_rx ||
    449	    sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
    450	    sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
    451		goto error;
    452
    453	return;
    454
    455error:
    456	if (host->chan_tx)
    457		dma_release_channel(host->chan_tx);
    458	if (host->chan_rx)
    459		dma_release_channel(host->chan_rx);
    460	host->chan_tx = host->chan_rx = NULL;
    461}
    462
    463static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
    464{
    465	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
    466	/* Descriptors are freed automatically */
    467	if (host->chan_tx) {
    468		struct dma_chan *chan = host->chan_tx;
    469		host->chan_tx = NULL;
    470		dma_release_channel(chan);
    471	}
    472	if (host->chan_rx) {
    473		struct dma_chan *chan = host->chan_rx;
    474		host->chan_rx = NULL;
    475		dma_release_channel(chan);
    476	}
    477
    478	host->dma_active = false;
    479}
    480
    481static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
    482{
    483	struct device *dev = sh_mmcif_host_to_dev(host);
    484	struct sh_mmcif_plat_data *p = dev->platform_data;
    485	bool sup_pclk = p ? p->sup_pclk : false;
    486	unsigned int current_clk = clk_get_rate(host->clk);
    487	unsigned int clkdiv;
    488
    489	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
    490	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
    491
    492	if (!clk)
    493		return;
    494
    495	if (host->clkdiv_map) {
    496		unsigned int freq, best_freq, myclk, div, diff_min, diff;
    497		int i;
    498
    499		clkdiv = 0;
    500		diff_min = ~0;
    501		best_freq = 0;
    502		for (i = 31; i >= 0; i--) {
    503			if (!((1 << i) & host->clkdiv_map))
    504				continue;
    505
    506			/*
    507			 * clk = parent_freq / div
    508			 * -> parent_freq = clk x div
    509			 */
    510
    511			div = 1 << (i + 1);
    512			freq = clk_round_rate(host->clk, clk * div);
    513			myclk = freq / div;
    514			diff = (myclk > clk) ? myclk - clk : clk - myclk;
    515
    516			if (diff <= diff_min) {
    517				best_freq = freq;
    518				clkdiv = i;
    519				diff_min = diff;
    520			}
    521		}
    522
    523		dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
    524			(best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
    525
    526		clk_set_rate(host->clk, best_freq);
    527		clkdiv = clkdiv << 16;
    528	} else if (sup_pclk && clk == current_clk) {
    529		clkdiv = CLK_SUP_PCLK;
    530	} else {
    531		clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
    532	}
    533
    534	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
    535	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
    536}
    537
    538static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
    539{
    540	u32 tmp;
    541
    542	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
    543
    544	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
    545	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
    546	if (host->ccs_enable)
    547		tmp |= SCCSTO_29;
    548	if (host->clk_ctrl2_enable)
    549		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
    550	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
    551		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
    552	/* byte swap on */
    553	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
    554}
    555
    556static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
    557{
    558	struct device *dev = sh_mmcif_host_to_dev(host);
    559	u32 state1, state2;
    560	int ret, timeout;
    561
    562	host->sd_error = false;
    563
    564	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
    565	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
    566	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
    567	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
    568
    569	if (state1 & STS1_CMDSEQ) {
    570		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
    571		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
    572		for (timeout = 10000; timeout; timeout--) {
    573			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
    574			      & STS1_CMDSEQ))
    575				break;
    576			mdelay(1);
    577		}
    578		if (!timeout) {
    579			dev_err(dev,
    580				"Forced end of command sequence timeout err\n");
    581			return -EIO;
    582		}
    583		sh_mmcif_sync_reset(host);
    584		dev_dbg(dev, "Forced end of command sequence\n");
    585		return -EIO;
    586	}
    587
    588	if (state2 & STS2_CRC_ERR) {
    589		dev_err(dev, " CRC error: state %u, wait %u\n",
    590			host->state, host->wait_for);
    591		ret = -EIO;
    592	} else if (state2 & STS2_TIMEOUT_ERR) {
    593		dev_err(dev, " Timeout: state %u, wait %u\n",
    594			host->state, host->wait_for);
    595		ret = -ETIMEDOUT;
    596	} else {
    597		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
    598			host->state, host->wait_for);
    599		ret = -EIO;
    600	}
    601	return ret;
    602}
    603
    604static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
    605{
    606	struct mmc_data *data = host->mrq->data;
    607
    608	host->sg_blkidx += host->blocksize;
    609
    610	/* data->sg->length must be a multiple of host->blocksize? */
    611	BUG_ON(host->sg_blkidx > data->sg->length);
    612
    613	if (host->sg_blkidx == data->sg->length) {
    614		host->sg_blkidx = 0;
    615		if (++host->sg_idx < data->sg_len)
    616			host->pio_ptr = sg_virt(++data->sg);
    617	} else {
    618		host->pio_ptr = p;
    619	}
    620
    621	return host->sg_idx != data->sg_len;
    622}
    623
    624static void sh_mmcif_single_read(struct sh_mmcif_host *host,
    625				 struct mmc_request *mrq)
    626{
    627	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
    628			   BLOCK_SIZE_MASK) + 3;
    629
    630	host->wait_for = MMCIF_WAIT_FOR_READ;
    631
    632	/* buf read enable */
    633	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
    634}
    635
    636static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
    637{
    638	struct device *dev = sh_mmcif_host_to_dev(host);
    639	struct mmc_data *data = host->mrq->data;
    640	u32 *p = sg_virt(data->sg);
    641	int i;
    642
    643	if (host->sd_error) {
    644		data->error = sh_mmcif_error_manage(host);
    645		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
    646		return false;
    647	}
    648
    649	for (i = 0; i < host->blocksize / 4; i++)
    650		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
    651
    652	/* buffer read end */
    653	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
    654	host->wait_for = MMCIF_WAIT_FOR_READ_END;
    655
    656	return true;
    657}
    658
    659static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
    660				struct mmc_request *mrq)
    661{
    662	struct mmc_data *data = mrq->data;
    663
    664	if (!data->sg_len || !data->sg->length)
    665		return;
    666
    667	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
    668		BLOCK_SIZE_MASK;
    669
    670	host->wait_for = MMCIF_WAIT_FOR_MREAD;
    671	host->sg_idx = 0;
    672	host->sg_blkidx = 0;
    673	host->pio_ptr = sg_virt(data->sg);
    674
    675	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
    676}
    677
    678static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
    679{
    680	struct device *dev = sh_mmcif_host_to_dev(host);
    681	struct mmc_data *data = host->mrq->data;
    682	u32 *p = host->pio_ptr;
    683	int i;
    684
    685	if (host->sd_error) {
    686		data->error = sh_mmcif_error_manage(host);
    687		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
    688		return false;
    689	}
    690
    691	BUG_ON(!data->sg->length);
    692
    693	for (i = 0; i < host->blocksize / 4; i++)
    694		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
    695
    696	if (!sh_mmcif_next_block(host, p))
    697		return false;
    698
    699	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
    700
    701	return true;
    702}
    703
    704static void sh_mmcif_single_write(struct sh_mmcif_host *host,
    705					struct mmc_request *mrq)
    706{
    707	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
    708			   BLOCK_SIZE_MASK) + 3;
    709
    710	host->wait_for = MMCIF_WAIT_FOR_WRITE;
    711
    712	/* buf write enable */
    713	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
    714}
    715
    716static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
    717{
    718	struct device *dev = sh_mmcif_host_to_dev(host);
    719	struct mmc_data *data = host->mrq->data;
    720	u32 *p = sg_virt(data->sg);
    721	int i;
    722
    723	if (host->sd_error) {
    724		data->error = sh_mmcif_error_manage(host);
    725		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
    726		return false;
    727	}
    728
    729	for (i = 0; i < host->blocksize / 4; i++)
    730		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
    731
    732	/* buffer write end */
    733	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
    734	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
    735
    736	return true;
    737}
    738
    739static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
    740				struct mmc_request *mrq)
    741{
    742	struct mmc_data *data = mrq->data;
    743
    744	if (!data->sg_len || !data->sg->length)
    745		return;
    746
    747	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
    748		BLOCK_SIZE_MASK;
    749
    750	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
    751	host->sg_idx = 0;
    752	host->sg_blkidx = 0;
    753	host->pio_ptr = sg_virt(data->sg);
    754
    755	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
    756}
    757
    758static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
    759{
    760	struct device *dev = sh_mmcif_host_to_dev(host);
    761	struct mmc_data *data = host->mrq->data;
    762	u32 *p = host->pio_ptr;
    763	int i;
    764
    765	if (host->sd_error) {
    766		data->error = sh_mmcif_error_manage(host);
    767		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
    768		return false;
    769	}
    770
    771	BUG_ON(!data->sg->length);
    772
    773	for (i = 0; i < host->blocksize / 4; i++)
    774		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
    775
    776	if (!sh_mmcif_next_block(host, p))
    777		return false;
    778
    779	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
    780
    781	return true;
    782}
    783
    784static void sh_mmcif_get_response(struct sh_mmcif_host *host,
    785						struct mmc_command *cmd)
    786{
    787	if (cmd->flags & MMC_RSP_136) {
    788		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
    789		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
    790		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
    791		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
    792	} else
    793		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
    794}
    795
    796static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
    797						struct mmc_command *cmd)
    798{
    799	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
    800}
    801
    802static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
    803			    struct mmc_request *mrq)
    804{
    805	struct device *dev = sh_mmcif_host_to_dev(host);
    806	struct mmc_data *data = mrq->data;
    807	struct mmc_command *cmd = mrq->cmd;
    808	u32 opc = cmd->opcode;
    809	u32 tmp = 0;
    810
    811	/* Response Type check */
    812	switch (mmc_resp_type(cmd)) {
    813	case MMC_RSP_NONE:
    814		tmp |= CMD_SET_RTYP_NO;
    815		break;
    816	case MMC_RSP_R1:
    817	case MMC_RSP_R3:
    818		tmp |= CMD_SET_RTYP_6B;
    819		break;
    820	case MMC_RSP_R1B:
    821		tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
    822		break;
    823	case MMC_RSP_R2:
    824		tmp |= CMD_SET_RTYP_17B;
    825		break;
    826	default:
    827		dev_err(dev, "Unsupported response type.\n");
    828		break;
    829	}
    830
    831	/* WDAT / DATW */
    832	if (data) {
    833		tmp |= CMD_SET_WDAT;
    834		switch (host->bus_width) {
    835		case MMC_BUS_WIDTH_1:
    836			tmp |= CMD_SET_DATW_1;
    837			break;
    838		case MMC_BUS_WIDTH_4:
    839			tmp |= CMD_SET_DATW_4;
    840			break;
    841		case MMC_BUS_WIDTH_8:
    842			tmp |= CMD_SET_DATW_8;
    843			break;
    844		default:
    845			dev_err(dev, "Unsupported bus width.\n");
    846			break;
    847		}
    848		switch (host->timing) {
    849		case MMC_TIMING_MMC_DDR52:
    850			/*
    851			 * MMC core will only set this timing, if the host
    852			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
    853			 * capability. MMCIF implementations with this
    854			 * capability, e.g. sh73a0, will have to set it
    855			 * in their platform data.
    856			 */
    857			tmp |= CMD_SET_DARS;
    858			break;
    859		}
    860	}
    861	/* DWEN */
    862	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
    863		tmp |= CMD_SET_DWEN;
    864	/* CMLTE/CMD12EN */
    865	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
    866		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
    867		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
    868				data->blocks << 16);
    869	}
    870	/* RIDXC[1:0] check bits */
    871	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
    872	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
    873		tmp |= CMD_SET_RIDXC_BITS;
    874	/* RCRC7C[1:0] check bits */
    875	if (opc == MMC_SEND_OP_COND)
    876		tmp |= CMD_SET_CRC7C_BITS;
    877	/* RCRC7C[1:0] internal CRC7 */
    878	if (opc == MMC_ALL_SEND_CID ||
    879		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
    880		tmp |= CMD_SET_CRC7C_INTERNAL;
    881
    882	return (opc << 24) | tmp;
    883}
    884
    885static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
    886			       struct mmc_request *mrq, u32 opc)
    887{
    888	struct device *dev = sh_mmcif_host_to_dev(host);
    889
    890	switch (opc) {
    891	case MMC_READ_MULTIPLE_BLOCK:
    892		sh_mmcif_multi_read(host, mrq);
    893		return 0;
    894	case MMC_WRITE_MULTIPLE_BLOCK:
    895		sh_mmcif_multi_write(host, mrq);
    896		return 0;
    897	case MMC_WRITE_BLOCK:
    898		sh_mmcif_single_write(host, mrq);
    899		return 0;
    900	case MMC_READ_SINGLE_BLOCK:
    901	case MMC_SEND_EXT_CSD:
    902		sh_mmcif_single_read(host, mrq);
    903		return 0;
    904	default:
    905		dev_err(dev, "Unsupported CMD%d\n", opc);
    906		return -EINVAL;
    907	}
    908}
    909
    910static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
    911			       struct mmc_request *mrq)
    912{
    913	struct mmc_command *cmd = mrq->cmd;
    914	u32 opc;
    915	u32 mask = 0;
    916	unsigned long flags;
    917
    918	if (cmd->flags & MMC_RSP_BUSY)
    919		mask = MASK_START_CMD | MASK_MRBSYE;
    920	else
    921		mask = MASK_START_CMD | MASK_MCRSPE;
    922
    923	if (host->ccs_enable)
    924		mask |= MASK_MCCSTO;
    925
    926	if (mrq->data) {
    927		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
    928		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
    929				mrq->data->blksz);
    930	}
    931	opc = sh_mmcif_set_cmd(host, mrq);
    932
    933	if (host->ccs_enable)
    934		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
    935	else
    936		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
    937	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
    938	/* set arg */
    939	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
    940	/* set cmd */
    941	spin_lock_irqsave(&host->lock, flags);
    942	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
    943
    944	host->wait_for = MMCIF_WAIT_FOR_CMD;
    945	schedule_delayed_work(&host->timeout_work, host->timeout);
    946	spin_unlock_irqrestore(&host->lock, flags);
    947}
    948
    949static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
    950			      struct mmc_request *mrq)
    951{
    952	struct device *dev = sh_mmcif_host_to_dev(host);
    953
    954	switch (mrq->cmd->opcode) {
    955	case MMC_READ_MULTIPLE_BLOCK:
    956		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
    957		break;
    958	case MMC_WRITE_MULTIPLE_BLOCK:
    959		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
    960		break;
    961	default:
    962		dev_err(dev, "unsupported stop cmd\n");
    963		mrq->stop->error = sh_mmcif_error_manage(host);
    964		return;
    965	}
    966
    967	host->wait_for = MMCIF_WAIT_FOR_STOP;
    968}
    969
    970static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
    971{
    972	struct sh_mmcif_host *host = mmc_priv(mmc);
    973	struct device *dev = sh_mmcif_host_to_dev(host);
    974	unsigned long flags;
    975
    976	spin_lock_irqsave(&host->lock, flags);
    977	if (host->state != STATE_IDLE) {
    978		dev_dbg(dev, "%s() rejected, state %u\n",
    979			__func__, host->state);
    980		spin_unlock_irqrestore(&host->lock, flags);
    981		mrq->cmd->error = -EAGAIN;
    982		mmc_request_done(mmc, mrq);
    983		return;
    984	}
    985
    986	host->state = STATE_REQUEST;
    987	spin_unlock_irqrestore(&host->lock, flags);
    988
    989	host->mrq = mrq;
    990
    991	sh_mmcif_start_cmd(host, mrq);
    992}
    993
    994static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
    995{
    996	struct device *dev = sh_mmcif_host_to_dev(host);
    997
    998	if (host->mmc->f_max) {
    999		unsigned int f_max, f_min = 0, f_min_old;
   1000
   1001		f_max = host->mmc->f_max;
   1002		for (f_min_old = f_max; f_min_old > 2;) {
   1003			f_min = clk_round_rate(host->clk, f_min_old / 2);
   1004			if (f_min == f_min_old)
   1005				break;
   1006			f_min_old = f_min;
   1007		}
   1008
   1009		/*
   1010		 * This driver assumes this SoC is R-Car Gen2 or later
   1011		 */
   1012		host->clkdiv_map = 0x3ff;
   1013
   1014		host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
   1015		host->mmc->f_min = f_min >> fls(host->clkdiv_map);
   1016	} else {
   1017		unsigned int clk = clk_get_rate(host->clk);
   1018
   1019		host->mmc->f_max = clk / 2;
   1020		host->mmc->f_min = clk / 512;
   1021	}
   1022
   1023	dev_dbg(dev, "clk max/min = %d/%d\n",
   1024		host->mmc->f_max, host->mmc->f_min);
   1025}
   1026
   1027static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
   1028{
   1029	struct sh_mmcif_host *host = mmc_priv(mmc);
   1030	struct device *dev = sh_mmcif_host_to_dev(host);
   1031	unsigned long flags;
   1032
   1033	spin_lock_irqsave(&host->lock, flags);
   1034	if (host->state != STATE_IDLE) {
   1035		dev_dbg(dev, "%s() rejected, state %u\n",
   1036			__func__, host->state);
   1037		spin_unlock_irqrestore(&host->lock, flags);
   1038		return;
   1039	}
   1040
   1041	host->state = STATE_IOS;
   1042	spin_unlock_irqrestore(&host->lock, flags);
   1043
   1044	switch (ios->power_mode) {
   1045	case MMC_POWER_UP:
   1046		if (!IS_ERR(mmc->supply.vmmc))
   1047			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
   1048		if (!host->power) {
   1049			clk_prepare_enable(host->clk);
   1050			pm_runtime_get_sync(dev);
   1051			sh_mmcif_sync_reset(host);
   1052			sh_mmcif_request_dma(host);
   1053			host->power = true;
   1054		}
   1055		break;
   1056	case MMC_POWER_OFF:
   1057		if (!IS_ERR(mmc->supply.vmmc))
   1058			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
   1059		if (host->power) {
   1060			sh_mmcif_clock_control(host, 0);
   1061			sh_mmcif_release_dma(host);
   1062			pm_runtime_put(dev);
   1063			clk_disable_unprepare(host->clk);
   1064			host->power = false;
   1065		}
   1066		break;
   1067	case MMC_POWER_ON:
   1068		sh_mmcif_clock_control(host, ios->clock);
   1069		break;
   1070	}
   1071
   1072	host->timing = ios->timing;
   1073	host->bus_width = ios->bus_width;
   1074	host->state = STATE_IDLE;
   1075}
   1076
   1077static const struct mmc_host_ops sh_mmcif_ops = {
   1078	.request	= sh_mmcif_request,
   1079	.set_ios	= sh_mmcif_set_ios,
   1080	.get_cd		= mmc_gpio_get_cd,
   1081};
   1082
   1083static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
   1084{
   1085	struct mmc_command *cmd = host->mrq->cmd;
   1086	struct mmc_data *data = host->mrq->data;
   1087	struct device *dev = sh_mmcif_host_to_dev(host);
   1088	long time;
   1089
   1090	if (host->sd_error) {
   1091		switch (cmd->opcode) {
   1092		case MMC_ALL_SEND_CID:
   1093		case MMC_SELECT_CARD:
   1094		case MMC_APP_CMD:
   1095			cmd->error = -ETIMEDOUT;
   1096			break;
   1097		default:
   1098			cmd->error = sh_mmcif_error_manage(host);
   1099			break;
   1100		}
   1101		dev_dbg(dev, "CMD%d error %d\n",
   1102			cmd->opcode, cmd->error);
   1103		host->sd_error = false;
   1104		return false;
   1105	}
   1106	if (!(cmd->flags & MMC_RSP_PRESENT)) {
   1107		cmd->error = 0;
   1108		return false;
   1109	}
   1110
   1111	sh_mmcif_get_response(host, cmd);
   1112
   1113	if (!data)
   1114		return false;
   1115
   1116	/*
   1117	 * Completion can be signalled from DMA callback and error, so, have to
   1118	 * reset here, before setting .dma_active
   1119	 */
   1120	init_completion(&host->dma_complete);
   1121
   1122	if (data->flags & MMC_DATA_READ) {
   1123		if (host->chan_rx)
   1124			sh_mmcif_start_dma_rx(host);
   1125	} else {
   1126		if (host->chan_tx)
   1127			sh_mmcif_start_dma_tx(host);
   1128	}
   1129
   1130	if (!host->dma_active) {
   1131		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
   1132		return !data->error;
   1133	}
   1134
   1135	/* Running in the IRQ thread, can sleep */
   1136	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
   1137							 host->timeout);
   1138
   1139	if (data->flags & MMC_DATA_READ)
   1140		dma_unmap_sg(host->chan_rx->device->dev,
   1141			     data->sg, data->sg_len,
   1142			     DMA_FROM_DEVICE);
   1143	else
   1144		dma_unmap_sg(host->chan_tx->device->dev,
   1145			     data->sg, data->sg_len,
   1146			     DMA_TO_DEVICE);
   1147
   1148	if (host->sd_error) {
   1149		dev_err(host->mmc->parent,
   1150			"Error IRQ while waiting for DMA completion!\n");
   1151		/* Woken up by an error IRQ: abort DMA */
   1152		data->error = sh_mmcif_error_manage(host);
   1153	} else if (!time) {
   1154		dev_err(host->mmc->parent, "DMA timeout!\n");
   1155		data->error = -ETIMEDOUT;
   1156	} else if (time < 0) {
   1157		dev_err(host->mmc->parent,
   1158			"wait_for_completion_...() error %ld!\n", time);
   1159		data->error = time;
   1160	}
   1161	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
   1162			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
   1163	host->dma_active = false;
   1164
   1165	if (data->error) {
   1166		data->bytes_xfered = 0;
   1167		/* Abort DMA */
   1168		if (data->flags & MMC_DATA_READ)
   1169			dmaengine_terminate_sync(host->chan_rx);
   1170		else
   1171			dmaengine_terminate_sync(host->chan_tx);
   1172	}
   1173
   1174	return false;
   1175}
   1176
   1177static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
   1178{
   1179	struct sh_mmcif_host *host = dev_id;
   1180	struct mmc_request *mrq;
   1181	struct device *dev = sh_mmcif_host_to_dev(host);
   1182	bool wait = false;
   1183	unsigned long flags;
   1184	int wait_work;
   1185
   1186	spin_lock_irqsave(&host->lock, flags);
   1187	wait_work = host->wait_for;
   1188	spin_unlock_irqrestore(&host->lock, flags);
   1189
   1190	cancel_delayed_work_sync(&host->timeout_work);
   1191
   1192	mutex_lock(&host->thread_lock);
   1193
   1194	mrq = host->mrq;
   1195	if (!mrq) {
   1196		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
   1197			host->state, host->wait_for);
   1198		mutex_unlock(&host->thread_lock);
   1199		return IRQ_HANDLED;
   1200	}
   1201
   1202	/*
   1203	 * All handlers return true, if processing continues, and false, if the
   1204	 * request has to be completed - successfully or not
   1205	 */
   1206	switch (wait_work) {
   1207	case MMCIF_WAIT_FOR_REQUEST:
   1208		/* We're too late, the timeout has already kicked in */
   1209		mutex_unlock(&host->thread_lock);
   1210		return IRQ_HANDLED;
   1211	case MMCIF_WAIT_FOR_CMD:
   1212		/* Wait for data? */
   1213		wait = sh_mmcif_end_cmd(host);
   1214		break;
   1215	case MMCIF_WAIT_FOR_MREAD:
   1216		/* Wait for more data? */
   1217		wait = sh_mmcif_mread_block(host);
   1218		break;
   1219	case MMCIF_WAIT_FOR_READ:
   1220		/* Wait for data end? */
   1221		wait = sh_mmcif_read_block(host);
   1222		break;
   1223	case MMCIF_WAIT_FOR_MWRITE:
   1224		/* Wait data to write? */
   1225		wait = sh_mmcif_mwrite_block(host);
   1226		break;
   1227	case MMCIF_WAIT_FOR_WRITE:
   1228		/* Wait for data end? */
   1229		wait = sh_mmcif_write_block(host);
   1230		break;
   1231	case MMCIF_WAIT_FOR_STOP:
   1232		if (host->sd_error) {
   1233			mrq->stop->error = sh_mmcif_error_manage(host);
   1234			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
   1235			break;
   1236		}
   1237		sh_mmcif_get_cmd12response(host, mrq->stop);
   1238		mrq->stop->error = 0;
   1239		break;
   1240	case MMCIF_WAIT_FOR_READ_END:
   1241	case MMCIF_WAIT_FOR_WRITE_END:
   1242		if (host->sd_error) {
   1243			mrq->data->error = sh_mmcif_error_manage(host);
   1244			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
   1245		}
   1246		break;
   1247	default:
   1248		BUG();
   1249	}
   1250
   1251	if (wait) {
   1252		schedule_delayed_work(&host->timeout_work, host->timeout);
   1253		/* Wait for more data */
   1254		mutex_unlock(&host->thread_lock);
   1255		return IRQ_HANDLED;
   1256	}
   1257
   1258	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
   1259		struct mmc_data *data = mrq->data;
   1260		if (!mrq->cmd->error && data && !data->error)
   1261			data->bytes_xfered =
   1262				data->blocks * data->blksz;
   1263
   1264		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
   1265			sh_mmcif_stop_cmd(host, mrq);
   1266			if (!mrq->stop->error) {
   1267				schedule_delayed_work(&host->timeout_work, host->timeout);
   1268				mutex_unlock(&host->thread_lock);
   1269				return IRQ_HANDLED;
   1270			}
   1271		}
   1272	}
   1273
   1274	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
   1275	host->state = STATE_IDLE;
   1276	host->mrq = NULL;
   1277	mmc_request_done(host->mmc, mrq);
   1278
   1279	mutex_unlock(&host->thread_lock);
   1280
   1281	return IRQ_HANDLED;
   1282}
   1283
   1284static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
   1285{
   1286	struct sh_mmcif_host *host = dev_id;
   1287	struct device *dev = sh_mmcif_host_to_dev(host);
   1288	u32 state, mask;
   1289
   1290	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
   1291	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
   1292	if (host->ccs_enable)
   1293		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
   1294	else
   1295		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
   1296	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
   1297
   1298	if (state & ~MASK_CLEAN)
   1299		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
   1300			state);
   1301
   1302	if (state & INT_ERR_STS || state & ~INT_ALL) {
   1303		host->sd_error = true;
   1304		dev_dbg(dev, "int err state = 0x%08x\n", state);
   1305	}
   1306	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
   1307		if (!host->mrq)
   1308			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
   1309		if (!host->dma_active)
   1310			return IRQ_WAKE_THREAD;
   1311		else if (host->sd_error)
   1312			sh_mmcif_dma_complete(host);
   1313	} else {
   1314		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
   1315	}
   1316
   1317	return IRQ_HANDLED;
   1318}
   1319
   1320static void sh_mmcif_timeout_work(struct work_struct *work)
   1321{
   1322	struct delayed_work *d = to_delayed_work(work);
   1323	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
   1324	struct mmc_request *mrq = host->mrq;
   1325	struct device *dev = sh_mmcif_host_to_dev(host);
   1326	unsigned long flags;
   1327
   1328	if (host->dying)
   1329		/* Don't run after mmc_remove_host() */
   1330		return;
   1331
   1332	spin_lock_irqsave(&host->lock, flags);
   1333	if (host->state == STATE_IDLE) {
   1334		spin_unlock_irqrestore(&host->lock, flags);
   1335		return;
   1336	}
   1337
   1338	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
   1339		host->wait_for, mrq->cmd->opcode);
   1340
   1341	host->state = STATE_TIMEOUT;
   1342	spin_unlock_irqrestore(&host->lock, flags);
   1343
   1344	/*
   1345	 * Handle races with cancel_delayed_work(), unless
   1346	 * cancel_delayed_work_sync() is used
   1347	 */
   1348	switch (host->wait_for) {
   1349	case MMCIF_WAIT_FOR_CMD:
   1350		mrq->cmd->error = sh_mmcif_error_manage(host);
   1351		break;
   1352	case MMCIF_WAIT_FOR_STOP:
   1353		mrq->stop->error = sh_mmcif_error_manage(host);
   1354		break;
   1355	case MMCIF_WAIT_FOR_MREAD:
   1356	case MMCIF_WAIT_FOR_MWRITE:
   1357	case MMCIF_WAIT_FOR_READ:
   1358	case MMCIF_WAIT_FOR_WRITE:
   1359	case MMCIF_WAIT_FOR_READ_END:
   1360	case MMCIF_WAIT_FOR_WRITE_END:
   1361		mrq->data->error = sh_mmcif_error_manage(host);
   1362		break;
   1363	default:
   1364		BUG();
   1365	}
   1366
   1367	host->state = STATE_IDLE;
   1368	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
   1369	host->mrq = NULL;
   1370	mmc_request_done(host->mmc, mrq);
   1371}
   1372
   1373static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
   1374{
   1375	struct device *dev = sh_mmcif_host_to_dev(host);
   1376	struct sh_mmcif_plat_data *pd = dev->platform_data;
   1377	struct mmc_host *mmc = host->mmc;
   1378
   1379	mmc_regulator_get_supply(mmc);
   1380
   1381	if (!pd)
   1382		return;
   1383
   1384	if (!mmc->ocr_avail)
   1385		mmc->ocr_avail = pd->ocr;
   1386	else if (pd->ocr)
   1387		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
   1388}
   1389
   1390static int sh_mmcif_probe(struct platform_device *pdev)
   1391{
   1392	int ret = 0, irq[2];
   1393	struct mmc_host *mmc;
   1394	struct sh_mmcif_host *host;
   1395	struct device *dev = &pdev->dev;
   1396	struct sh_mmcif_plat_data *pd = dev->platform_data;
   1397	void __iomem *reg;
   1398	const char *name;
   1399
   1400	irq[0] = platform_get_irq(pdev, 0);
   1401	irq[1] = platform_get_irq_optional(pdev, 1);
   1402	if (irq[0] < 0)
   1403		return -ENXIO;
   1404
   1405	reg = devm_platform_ioremap_resource(pdev, 0);
   1406	if (IS_ERR(reg))
   1407		return PTR_ERR(reg);
   1408
   1409	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
   1410	if (!mmc)
   1411		return -ENOMEM;
   1412
   1413	ret = mmc_of_parse(mmc);
   1414	if (ret < 0)
   1415		goto err_host;
   1416
   1417	host		= mmc_priv(mmc);
   1418	host->mmc	= mmc;
   1419	host->addr	= reg;
   1420	host->timeout	= msecs_to_jiffies(10000);
   1421	host->ccs_enable = true;
   1422	host->clk_ctrl2_enable = false;
   1423
   1424	host->pd = pdev;
   1425
   1426	spin_lock_init(&host->lock);
   1427
   1428	mmc->ops = &sh_mmcif_ops;
   1429	sh_mmcif_init_ocr(host);
   1430
   1431	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
   1432	mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
   1433	mmc->max_busy_timeout = 10000;
   1434
   1435	if (pd && pd->caps)
   1436		mmc->caps |= pd->caps;
   1437	mmc->max_segs = 32;
   1438	mmc->max_blk_size = 512;
   1439	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
   1440	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
   1441	mmc->max_seg_size = mmc->max_req_size;
   1442
   1443	platform_set_drvdata(pdev, host);
   1444
   1445	host->clk = devm_clk_get(dev, NULL);
   1446	if (IS_ERR(host->clk)) {
   1447		ret = PTR_ERR(host->clk);
   1448		dev_err(dev, "cannot get clock: %d\n", ret);
   1449		goto err_host;
   1450	}
   1451
   1452	ret = clk_prepare_enable(host->clk);
   1453	if (ret < 0)
   1454		goto err_host;
   1455
   1456	sh_mmcif_clk_setup(host);
   1457
   1458	pm_runtime_enable(dev);
   1459	host->power = false;
   1460
   1461	ret = pm_runtime_get_sync(dev);
   1462	if (ret < 0)
   1463		goto err_clk;
   1464
   1465	INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
   1466
   1467	sh_mmcif_sync_reset(host);
   1468	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
   1469
   1470	name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
   1471	ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
   1472					sh_mmcif_irqt, 0, name, host);
   1473	if (ret) {
   1474		dev_err(dev, "request_irq error (%s)\n", name);
   1475		goto err_clk;
   1476	}
   1477	if (irq[1] >= 0) {
   1478		ret = devm_request_threaded_irq(dev, irq[1],
   1479						sh_mmcif_intr, sh_mmcif_irqt,
   1480						0, "sh_mmc:int", host);
   1481		if (ret) {
   1482			dev_err(dev, "request_irq error (sh_mmc:int)\n");
   1483			goto err_clk;
   1484		}
   1485	}
   1486
   1487	mutex_init(&host->thread_lock);
   1488
   1489	ret = mmc_add_host(mmc);
   1490	if (ret < 0)
   1491		goto err_clk;
   1492
   1493	dev_pm_qos_expose_latency_limit(dev, 100);
   1494
   1495	dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
   1496		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
   1497		 clk_get_rate(host->clk) / 1000000UL);
   1498
   1499	pm_runtime_put(dev);
   1500	clk_disable_unprepare(host->clk);
   1501	return ret;
   1502
   1503err_clk:
   1504	clk_disable_unprepare(host->clk);
   1505	pm_runtime_put_sync(dev);
   1506	pm_runtime_disable(dev);
   1507err_host:
   1508	mmc_free_host(mmc);
   1509	return ret;
   1510}
   1511
   1512static int sh_mmcif_remove(struct platform_device *pdev)
   1513{
   1514	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
   1515
   1516	host->dying = true;
   1517	clk_prepare_enable(host->clk);
   1518	pm_runtime_get_sync(&pdev->dev);
   1519
   1520	dev_pm_qos_hide_latency_limit(&pdev->dev);
   1521
   1522	mmc_remove_host(host->mmc);
   1523	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
   1524
   1525	/*
   1526	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
   1527	 * mmc_remove_host() call above. But swapping order doesn't help either
   1528	 * (a query on the linux-mmc mailing list didn't bring any replies).
   1529	 */
   1530	cancel_delayed_work_sync(&host->timeout_work);
   1531
   1532	clk_disable_unprepare(host->clk);
   1533	mmc_free_host(host->mmc);
   1534	pm_runtime_put_sync(&pdev->dev);
   1535	pm_runtime_disable(&pdev->dev);
   1536
   1537	return 0;
   1538}
   1539
   1540#ifdef CONFIG_PM_SLEEP
   1541static int sh_mmcif_suspend(struct device *dev)
   1542{
   1543	struct sh_mmcif_host *host = dev_get_drvdata(dev);
   1544
   1545	pm_runtime_get_sync(dev);
   1546	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
   1547	pm_runtime_put(dev);
   1548
   1549	return 0;
   1550}
   1551
   1552static int sh_mmcif_resume(struct device *dev)
   1553{
   1554	return 0;
   1555}
   1556#endif
   1557
   1558static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
   1559	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
   1560};
   1561
   1562static struct platform_driver sh_mmcif_driver = {
   1563	.probe		= sh_mmcif_probe,
   1564	.remove		= sh_mmcif_remove,
   1565	.driver		= {
   1566		.name	= DRIVER_NAME,
   1567		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
   1568		.pm	= &sh_mmcif_dev_pm_ops,
   1569		.of_match_table = sh_mmcif_of_match,
   1570	},
   1571};
   1572
   1573module_platform_driver(sh_mmcif_driver);
   1574
   1575MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
   1576MODULE_LICENSE("GPL v2");
   1577MODULE_ALIAS("platform:" DRIVER_NAME);
   1578MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");