cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

usdhi6rol0.c (49780B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
      4 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
      5 */
      6
      7#include <linux/clk.h>
      8#include <linux/delay.h>
      9#include <linux/device.h>
     10#include <linux/dma-mapping.h>
     11#include <linux/dmaengine.h>
     12#include <linux/highmem.h>
     13#include <linux/interrupt.h>
     14#include <linux/io.h>
     15#include <linux/log2.h>
     16#include <linux/mmc/host.h>
     17#include <linux/mmc/mmc.h>
     18#include <linux/mmc/sd.h>
     19#include <linux/mmc/sdio.h>
     20#include <linux/module.h>
     21#include <linux/pagemap.h>
     22#include <linux/pinctrl/consumer.h>
     23#include <linux/platform_device.h>
     24#include <linux/scatterlist.h>
     25#include <linux/string.h>
     26#include <linux/time.h>
     27#include <linux/virtio.h>
     28#include <linux/workqueue.h>
     29
     30#define USDHI6_SD_CMD		0x0000
     31#define USDHI6_SD_PORT_SEL	0x0004
     32#define USDHI6_SD_ARG		0x0008
     33#define USDHI6_SD_STOP		0x0010
     34#define USDHI6_SD_SECCNT	0x0014
     35#define USDHI6_SD_RSP10		0x0018
     36#define USDHI6_SD_RSP32		0x0020
     37#define USDHI6_SD_RSP54		0x0028
     38#define USDHI6_SD_RSP76		0x0030
     39#define USDHI6_SD_INFO1		0x0038
     40#define USDHI6_SD_INFO2		0x003c
     41#define USDHI6_SD_INFO1_MASK	0x0040
     42#define USDHI6_SD_INFO2_MASK	0x0044
     43#define USDHI6_SD_CLK_CTRL	0x0048
     44#define USDHI6_SD_SIZE		0x004c
     45#define USDHI6_SD_OPTION	0x0050
     46#define USDHI6_SD_ERR_STS1	0x0058
     47#define USDHI6_SD_ERR_STS2	0x005c
     48#define USDHI6_SD_BUF0		0x0060
     49#define USDHI6_SDIO_MODE	0x0068
     50#define USDHI6_SDIO_INFO1	0x006c
     51#define USDHI6_SDIO_INFO1_MASK	0x0070
     52#define USDHI6_CC_EXT_MODE	0x01b0
     53#define USDHI6_SOFT_RST		0x01c0
     54#define USDHI6_VERSION		0x01c4
     55#define USDHI6_HOST_MODE	0x01c8
     56#define USDHI6_SDIF_MODE	0x01cc
     57
     58#define USDHI6_SD_CMD_APP		0x0040
     59#define USDHI6_SD_CMD_MODE_RSP_AUTO	0x0000
     60#define USDHI6_SD_CMD_MODE_RSP_NONE	0x0300
     61#define USDHI6_SD_CMD_MODE_RSP_R1	0x0400	/* Also R5, R6, R7 */
     62#define USDHI6_SD_CMD_MODE_RSP_R1B	0x0500	/* R1b */
     63#define USDHI6_SD_CMD_MODE_RSP_R2	0x0600
     64#define USDHI6_SD_CMD_MODE_RSP_R3	0x0700	/* Also R4 */
     65#define USDHI6_SD_CMD_DATA		0x0800
     66#define USDHI6_SD_CMD_READ		0x1000
     67#define USDHI6_SD_CMD_MULTI		0x2000
     68#define USDHI6_SD_CMD_CMD12_AUTO_OFF	0x4000
     69
     70#define USDHI6_CC_EXT_MODE_SDRW		BIT(1)
     71
     72#define USDHI6_SD_INFO1_RSP_END		BIT(0)
     73#define USDHI6_SD_INFO1_ACCESS_END	BIT(2)
     74#define USDHI6_SD_INFO1_CARD_OUT	BIT(3)
     75#define USDHI6_SD_INFO1_CARD_IN		BIT(4)
     76#define USDHI6_SD_INFO1_CD		BIT(5)
     77#define USDHI6_SD_INFO1_WP		BIT(7)
     78#define USDHI6_SD_INFO1_D3_CARD_OUT	BIT(8)
     79#define USDHI6_SD_INFO1_D3_CARD_IN	BIT(9)
     80
     81#define USDHI6_SD_INFO2_CMD_ERR		BIT(0)
     82#define USDHI6_SD_INFO2_CRC_ERR		BIT(1)
     83#define USDHI6_SD_INFO2_END_ERR		BIT(2)
     84#define USDHI6_SD_INFO2_TOUT		BIT(3)
     85#define USDHI6_SD_INFO2_IWA_ERR		BIT(4)
     86#define USDHI6_SD_INFO2_IRA_ERR		BIT(5)
     87#define USDHI6_SD_INFO2_RSP_TOUT	BIT(6)
     88#define USDHI6_SD_INFO2_SDDAT0		BIT(7)
     89#define USDHI6_SD_INFO2_BRE		BIT(8)
     90#define USDHI6_SD_INFO2_BWE		BIT(9)
     91#define USDHI6_SD_INFO2_SCLKDIVEN	BIT(13)
     92#define USDHI6_SD_INFO2_CBSY		BIT(14)
     93#define USDHI6_SD_INFO2_ILA		BIT(15)
     94
     95#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
     96#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
     97#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
     98#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
     99
    100#define USDHI6_SD_INFO2_ERR	(USDHI6_SD_INFO2_CMD_ERR |	\
    101	USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR |	\
    102	USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR |	\
    103	USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT |	\
    104	USDHI6_SD_INFO2_ILA)
    105
    106#define USDHI6_SD_INFO1_IRQ	(USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
    107				 USDHI6_SD_INFO1_CARD)
    108
    109#define USDHI6_SD_INFO2_IRQ	(USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
    110				 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
    111
    112#define USDHI6_SD_CLK_CTRL_SCLKEN	BIT(8)
    113
    114#define USDHI6_SD_STOP_STP		BIT(0)
    115#define USDHI6_SD_STOP_SEC		BIT(8)
    116
    117#define USDHI6_SDIO_INFO1_IOIRQ		BIT(0)
    118#define USDHI6_SDIO_INFO1_EXPUB52	BIT(14)
    119#define USDHI6_SDIO_INFO1_EXWT		BIT(15)
    120
    121#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR	BIT(13)
    122
    123#define USDHI6_SOFT_RST_RESERVED	(BIT(1) | BIT(2))
    124#define USDHI6_SOFT_RST_RESET		BIT(0)
    125
    126#define USDHI6_SD_OPTION_TIMEOUT_SHIFT	4
    127#define USDHI6_SD_OPTION_TIMEOUT_MASK	(0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
    128#define USDHI6_SD_OPTION_WIDTH_1	BIT(15)
    129
    130#define USDHI6_SD_PORT_SEL_PORTS_SHIFT	8
    131
    132#define USDHI6_SD_CLK_CTRL_DIV_MASK	0xff
    133
    134#define USDHI6_SDIO_INFO1_IRQ	(USDHI6_SDIO_INFO1_IOIRQ | 3 | \
    135				 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
    136
    137#define USDHI6_MIN_DMA 64
    138
    139#define USDHI6_REQ_TIMEOUT_MS 4000
    140
    141enum usdhi6_wait_for {
    142	USDHI6_WAIT_FOR_REQUEST,
    143	USDHI6_WAIT_FOR_CMD,
    144	USDHI6_WAIT_FOR_MREAD,
    145	USDHI6_WAIT_FOR_MWRITE,
    146	USDHI6_WAIT_FOR_READ,
    147	USDHI6_WAIT_FOR_WRITE,
    148	USDHI6_WAIT_FOR_DATA_END,
    149	USDHI6_WAIT_FOR_STOP,
    150	USDHI6_WAIT_FOR_DMA,
    151};
    152
    153struct usdhi6_page {
    154	struct page *page;
    155	void *mapped;		/* mapped page */
    156};
    157
    158struct usdhi6_host {
    159	struct mmc_host *mmc;
    160	struct mmc_request *mrq;
    161	void __iomem *base;
    162	struct clk *clk;
    163
    164	/* SG memory handling */
    165
    166	/* Common for multiple and single block requests */
    167	struct usdhi6_page pg;	/* current page from an SG */
    168	void *blk_page;		/* either a mapped page, or the bounce buffer */
    169	size_t offset;		/* offset within a page, including sg->offset */
    170
    171	/* Blocks, crossing a page boundary */
    172	size_t head_len;
    173	struct usdhi6_page head_pg;
    174
    175	/* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
    176	struct scatterlist bounce_sg;
    177	u8 bounce_buf[512];
    178
    179	/* Multiple block requests only */
    180	struct scatterlist *sg;	/* current SG segment */
    181	int page_idx;		/* page index within an SG segment */
    182
    183	enum usdhi6_wait_for wait;
    184	u32 status_mask;
    185	u32 status2_mask;
    186	u32 sdio_mask;
    187	u32 io_error;
    188	u32 irq_status;
    189	unsigned long imclk;
    190	unsigned long rate;
    191	bool app_cmd;
    192
    193	/* Timeout handling */
    194	struct delayed_work timeout_work;
    195	unsigned long timeout;
    196
    197	/* DMA support */
    198	struct dma_chan *chan_rx;
    199	struct dma_chan *chan_tx;
    200	bool dma_active;
    201
    202	/* Pin control */
    203	struct pinctrl *pinctrl;
    204	struct pinctrl_state *pins_uhs;
    205};
    206
    207/*			I/O primitives					*/
    208
    209static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
    210{
    211	iowrite32(data, host->base + reg);
    212	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
    213		host->base, reg, data);
    214}
    215
    216static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
    217{
    218	iowrite16(data, host->base + reg);
    219	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
    220		host->base, reg, data);
    221}
    222
    223static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
    224{
    225	u32 data = ioread32(host->base + reg);
    226	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
    227		host->base, reg, data);
    228	return data;
    229}
    230
    231static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
    232{
    233	u16 data = ioread16(host->base + reg);
    234	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
    235		host->base, reg, data);
    236	return data;
    237}
    238
    239static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
    240{
    241	host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
    242	host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
    243	usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
    244	usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
    245}
    246
    247static void usdhi6_wait_for_resp(struct usdhi6_host *host)
    248{
    249	usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
    250			  USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
    251			  USDHI6_SD_INFO2_ERR);
    252}
    253
    254static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
    255{
    256	usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
    257			  USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
    258			  (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
    259}
    260
    261static void usdhi6_only_cd(struct usdhi6_host *host)
    262{
    263	/* Mask all except card hotplug */
    264	usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
    265}
    266
    267static void usdhi6_mask_all(struct usdhi6_host *host)
    268{
    269	usdhi6_irq_enable(host, 0, 0);
    270}
    271
    272static int usdhi6_error_code(struct usdhi6_host *host)
    273{
    274	u32 err;
    275
    276	usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
    277
    278	if (host->io_error &
    279	    (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
    280		u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
    281		int opc = host->mrq ? host->mrq->cmd->opcode : -1;
    282
    283		err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
    284		/* Response timeout is often normal, don't spam the log */
    285		if (host->wait == USDHI6_WAIT_FOR_CMD)
    286			dev_dbg(mmc_dev(host->mmc),
    287				"T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
    288				err, rsp54, host->wait, opc);
    289		else
    290			dev_warn(mmc_dev(host->mmc),
    291				 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
    292				 err, rsp54, host->wait, opc);
    293		return -ETIMEDOUT;
    294	}
    295
    296	err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
    297	if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
    298		dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
    299			 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
    300	if (host->io_error & USDHI6_SD_INFO2_ILA)
    301		return -EILSEQ;
    302
    303	return -EIO;
    304}
    305
    306/*			Scatter-Gather management			*/
    307
    308/*
    309 * In PIO mode we have to map each page separately, using kmap(). That way
    310 * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
    311 * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
    312 * have been observed with an SDIO WiFi card (b43 driver).
    313 */
    314static void usdhi6_blk_bounce(struct usdhi6_host *host,
    315			      struct scatterlist *sg)
    316{
    317	struct mmc_data *data = host->mrq->data;
    318	size_t blk_head = host->head_len;
    319
    320	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
    321		__func__, host->mrq->cmd->opcode, data->sg_len,
    322		data->blksz, data->blocks, sg->offset);
    323
    324	host->head_pg.page	= host->pg.page;
    325	host->head_pg.mapped	= host->pg.mapped;
    326	host->pg.page		= nth_page(host->pg.page, 1);
    327	host->pg.mapped		= kmap(host->pg.page);
    328
    329	host->blk_page = host->bounce_buf;
    330	host->offset = 0;
    331
    332	if (data->flags & MMC_DATA_READ)
    333		return;
    334
    335	memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
    336	       blk_head);
    337	memcpy(host->bounce_buf + blk_head, host->pg.mapped,
    338	       data->blksz - blk_head);
    339}
    340
    341/* Only called for multiple block IO */
    342static void usdhi6_sg_prep(struct usdhi6_host *host)
    343{
    344	struct mmc_request *mrq = host->mrq;
    345	struct mmc_data *data = mrq->data;
    346
    347	usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
    348
    349	host->sg = data->sg;
    350	/* TODO: if we always map, this is redundant */
    351	host->offset = host->sg->offset;
    352}
    353
    354/* Map the first page in an SG segment: common for multiple and single block IO */
    355static void *usdhi6_sg_map(struct usdhi6_host *host)
    356{
    357	struct mmc_data *data = host->mrq->data;
    358	struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
    359	size_t head = PAGE_SIZE - sg->offset;
    360	size_t blk_head = head % data->blksz;
    361
    362	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
    363	if (WARN(sg_dma_len(sg) % data->blksz,
    364		 "SG size %u isn't a multiple of block size %u\n",
    365		 sg_dma_len(sg), data->blksz))
    366		return NULL;
    367
    368	host->pg.page = sg_page(sg);
    369	host->pg.mapped = kmap(host->pg.page);
    370	host->offset = sg->offset;
    371
    372	/*
    373	 * Block size must be a power of 2 for multi-block transfers,
    374	 * therefore blk_head is equal for all pages in this SG
    375	 */
    376	host->head_len = blk_head;
    377
    378	if (head < data->blksz)
    379		/*
    380		 * The first block in the SG crosses a page boundary.
    381		 * Max blksz = 512, so blocks can only span 2 pages
    382		 */
    383		usdhi6_blk_bounce(host, sg);
    384	else
    385		host->blk_page = host->pg.mapped;
    386
    387	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
    388		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
    389		sg->offset, host->mrq->cmd->opcode, host->mrq);
    390
    391	return host->blk_page + host->offset;
    392}
    393
    394/* Unmap the current page: common for multiple and single block IO */
    395static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
    396{
    397	struct mmc_data *data = host->mrq->data;
    398	struct page *page = host->head_pg.page;
    399
    400	if (page) {
    401		/* Previous block was cross-page boundary */
    402		struct scatterlist *sg = data->sg_len > 1 ?
    403			host->sg : data->sg;
    404		size_t blk_head = host->head_len;
    405
    406		if (!data->error && data->flags & MMC_DATA_READ) {
    407			memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
    408			       host->bounce_buf, blk_head);
    409			memcpy(host->pg.mapped, host->bounce_buf + blk_head,
    410			       data->blksz - blk_head);
    411		}
    412
    413		flush_dcache_page(page);
    414		kunmap(page);
    415
    416		host->head_pg.page = NULL;
    417
    418		if (!force && sg_dma_len(sg) + sg->offset >
    419		    (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
    420			/* More blocks in this SG, don't unmap the next page */
    421			return;
    422	}
    423
    424	page = host->pg.page;
    425	if (!page)
    426		return;
    427
    428	flush_dcache_page(page);
    429	kunmap(page);
    430
    431	host->pg.page = NULL;
    432}
    433
    434/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
    435static void usdhi6_sg_advance(struct usdhi6_host *host)
    436{
    437	struct mmc_data *data = host->mrq->data;
    438	size_t done, total;
    439
    440	/* New offset: set at the end of the previous block */
    441	if (host->head_pg.page) {
    442		/* Finished a cross-page block, jump to the new page */
    443		host->page_idx++;
    444		host->offset = data->blksz - host->head_len;
    445		host->blk_page = host->pg.mapped;
    446		usdhi6_sg_unmap(host, false);
    447	} else {
    448		host->offset += data->blksz;
    449		/* The completed block didn't cross a page boundary */
    450		if (host->offset == PAGE_SIZE) {
    451			/* If required, we'll map the page below */
    452			host->offset = 0;
    453			host->page_idx++;
    454		}
    455	}
    456
    457	/*
    458	 * Now host->blk_page + host->offset point at the end of our last block
    459	 * and host->page_idx is the index of the page, in which our new block
    460	 * is located, if any
    461	 */
    462
    463	done = (host->page_idx << PAGE_SHIFT) + host->offset;
    464	total = host->sg->offset + sg_dma_len(host->sg);
    465
    466	dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
    467		done, total, host->offset);
    468
    469	if (done < total && host->offset) {
    470		/* More blocks in this page */
    471		if (host->offset + data->blksz > PAGE_SIZE)
    472			/* We approached at a block, that spans 2 pages */
    473			usdhi6_blk_bounce(host, host->sg);
    474
    475		return;
    476	}
    477
    478	/* Finished current page or an SG segment */
    479	usdhi6_sg_unmap(host, false);
    480
    481	if (done == total) {
    482		/*
    483		 * End of an SG segment or the complete SG: jump to the next
    484		 * segment, we'll map it later in usdhi6_blk_read() or
    485		 * usdhi6_blk_write()
    486		 */
    487		struct scatterlist *next = sg_next(host->sg);
    488
    489		host->page_idx = 0;
    490
    491		if (!next)
    492			host->wait = USDHI6_WAIT_FOR_DATA_END;
    493		host->sg = next;
    494
    495		if (WARN(next && sg_dma_len(next) % data->blksz,
    496			 "SG size %u isn't a multiple of block size %u\n",
    497			 sg_dma_len(next), data->blksz))
    498			data->error = -EINVAL;
    499
    500		return;
    501	}
    502
    503	/* We cannot get here after crossing a page border */
    504
    505	/* Next page in the same SG */
    506	host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
    507	host->pg.mapped = kmap(host->pg.page);
    508	host->blk_page = host->pg.mapped;
    509
    510	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
    511		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
    512		host->mrq->cmd->opcode, host->mrq);
    513}
    514
    515/*			DMA handling					*/
    516
    517static void usdhi6_dma_release(struct usdhi6_host *host)
    518{
    519	host->dma_active = false;
    520	if (host->chan_tx) {
    521		struct dma_chan *chan = host->chan_tx;
    522		host->chan_tx = NULL;
    523		dma_release_channel(chan);
    524	}
    525	if (host->chan_rx) {
    526		struct dma_chan *chan = host->chan_rx;
    527		host->chan_rx = NULL;
    528		dma_release_channel(chan);
    529	}
    530}
    531
    532static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
    533{
    534	struct mmc_data *data = host->mrq->data;
    535
    536	if (!host->dma_active)
    537		return;
    538
    539	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
    540	host->dma_active = false;
    541
    542	if (data->flags & MMC_DATA_READ)
    543		dma_unmap_sg(host->chan_rx->device->dev, data->sg,
    544			     data->sg_len, DMA_FROM_DEVICE);
    545	else
    546		dma_unmap_sg(host->chan_tx->device->dev, data->sg,
    547			     data->sg_len, DMA_TO_DEVICE);
    548}
    549
    550static void usdhi6_dma_complete(void *arg)
    551{
    552	struct usdhi6_host *host = arg;
    553	struct mmc_request *mrq = host->mrq;
    554
    555	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
    556		 dev_name(mmc_dev(host->mmc)), mrq))
    557		return;
    558
    559	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
    560		mrq->cmd->opcode);
    561
    562	usdhi6_dma_stop_unmap(host);
    563	usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
    564}
    565
    566static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
    567			    enum dma_transfer_direction dir)
    568{
    569	struct mmc_data *data = host->mrq->data;
    570	struct scatterlist *sg = data->sg;
    571	struct dma_async_tx_descriptor *desc = NULL;
    572	dma_cookie_t cookie = -EINVAL;
    573	enum dma_data_direction data_dir;
    574	int ret;
    575
    576	switch (dir) {
    577	case DMA_MEM_TO_DEV:
    578		data_dir = DMA_TO_DEVICE;
    579		break;
    580	case DMA_DEV_TO_MEM:
    581		data_dir = DMA_FROM_DEVICE;
    582		break;
    583	default:
    584		return -EINVAL;
    585	}
    586
    587	ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
    588	if (ret > 0) {
    589		host->dma_active = true;
    590		desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
    591					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    592	}
    593
    594	if (desc) {
    595		desc->callback = usdhi6_dma_complete;
    596		desc->callback_param = host;
    597		cookie = dmaengine_submit(desc);
    598	}
    599
    600	dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
    601		__func__, data->sg_len, ret, cookie, desc);
    602
    603	if (cookie < 0) {
    604		/* DMA failed, fall back to PIO */
    605		if (ret >= 0)
    606			ret = cookie;
    607		usdhi6_dma_release(host);
    608		dev_warn(mmc_dev(host->mmc),
    609			 "DMA failed: %d, falling back to PIO\n", ret);
    610	}
    611
    612	return cookie;
    613}
    614
    615static int usdhi6_dma_start(struct usdhi6_host *host)
    616{
    617	if (!host->chan_rx || !host->chan_tx)
    618		return -ENODEV;
    619
    620	if (host->mrq->data->flags & MMC_DATA_READ)
    621		return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
    622
    623	return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
    624}
    625
    626static void usdhi6_dma_kill(struct usdhi6_host *host)
    627{
    628	struct mmc_data *data = host->mrq->data;
    629
    630	dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
    631		__func__, data->sg_len, data->blocks, data->blksz);
    632	/* Abort DMA */
    633	if (data->flags & MMC_DATA_READ)
    634		dmaengine_terminate_sync(host->chan_rx);
    635	else
    636		dmaengine_terminate_sync(host->chan_tx);
    637}
    638
    639static void usdhi6_dma_check_error(struct usdhi6_host *host)
    640{
    641	struct mmc_data *data = host->mrq->data;
    642
    643	dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
    644		__func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
    645
    646	if (host->io_error) {
    647		data->error = usdhi6_error_code(host);
    648		data->bytes_xfered = 0;
    649		usdhi6_dma_kill(host);
    650		usdhi6_dma_release(host);
    651		dev_warn(mmc_dev(host->mmc),
    652			 "DMA failed: %d, falling back to PIO\n", data->error);
    653		return;
    654	}
    655
    656	/*
    657	 * The datasheet tells us to check a response from the card, whereas
    658	 * responses only come after the command phase, not after the data
    659	 * phase. Let's check anyway.
    660	 */
    661	if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
    662		dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
    663}
    664
    665static void usdhi6_dma_kick(struct usdhi6_host *host)
    666{
    667	if (host->mrq->data->flags & MMC_DATA_READ)
    668		dma_async_issue_pending(host->chan_rx);
    669	else
    670		dma_async_issue_pending(host->chan_tx);
    671}
    672
    673static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
    674{
    675	struct dma_slave_config cfg = {
    676		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
    677		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
    678	};
    679	int ret;
    680
    681	host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
    682	dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
    683		host->chan_tx);
    684
    685	if (IS_ERR(host->chan_tx)) {
    686		host->chan_tx = NULL;
    687		return;
    688	}
    689
    690	cfg.direction = DMA_MEM_TO_DEV;
    691	cfg.dst_addr = start + USDHI6_SD_BUF0;
    692	cfg.dst_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
    693	cfg.src_addr = 0;
    694	ret = dmaengine_slave_config(host->chan_tx, &cfg);
    695	if (ret < 0)
    696		goto e_release_tx;
    697
    698	host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
    699	dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
    700		host->chan_rx);
    701
    702	if (IS_ERR(host->chan_rx)) {
    703		host->chan_rx = NULL;
    704		goto e_release_tx;
    705	}
    706
    707	cfg.direction = DMA_DEV_TO_MEM;
    708	cfg.src_addr = cfg.dst_addr;
    709	cfg.src_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
    710	cfg.dst_addr = 0;
    711	ret = dmaengine_slave_config(host->chan_rx, &cfg);
    712	if (ret < 0)
    713		goto e_release_rx;
    714
    715	return;
    716
    717e_release_rx:
    718	dma_release_channel(host->chan_rx);
    719	host->chan_rx = NULL;
    720e_release_tx:
    721	dma_release_channel(host->chan_tx);
    722	host->chan_tx = NULL;
    723}
    724
    725/*			API helpers					*/
    726
    727static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
    728{
    729	unsigned long rate = ios->clock;
    730	u32 val;
    731	unsigned int i;
    732
    733	for (i = 1000; i; i--) {
    734		if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
    735			break;
    736		usleep_range(10, 100);
    737	}
    738
    739	if (!i) {
    740		dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
    741		return;
    742	}
    743
    744	val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
    745
    746	if (rate) {
    747		unsigned long new_rate;
    748
    749		if (host->imclk <= rate) {
    750			if (ios->timing != MMC_TIMING_UHS_DDR50) {
    751				/* Cannot have 1-to-1 clock in DDR mode */
    752				new_rate = host->imclk;
    753				val |= 0xff;
    754			} else {
    755				new_rate = host->imclk / 2;
    756			}
    757		} else {
    758			unsigned long div =
    759				roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
    760			val |= div >> 2;
    761			new_rate = host->imclk / div;
    762		}
    763
    764		if (host->rate == new_rate)
    765			return;
    766
    767		host->rate = new_rate;
    768
    769		dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
    770			rate, (val & 0xff) << 2, new_rate);
    771	}
    772
    773	/*
    774	 * if old or new rate is equal to input rate, have to switch the clock
    775	 * off before changing and on after
    776	 */
    777	if (host->imclk == rate || host->imclk == host->rate || !rate)
    778		usdhi6_write(host, USDHI6_SD_CLK_CTRL,
    779			     val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
    780
    781	if (!rate) {
    782		host->rate = 0;
    783		return;
    784	}
    785
    786	usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
    787
    788	if (host->imclk == rate || host->imclk == host->rate ||
    789	    !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
    790		usdhi6_write(host, USDHI6_SD_CLK_CTRL,
    791			     val | USDHI6_SD_CLK_CTRL_SCLKEN);
    792}
    793
    794static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
    795{
    796	struct mmc_host *mmc = host->mmc;
    797
    798	if (!IS_ERR(mmc->supply.vmmc))
    799		/* Errors ignored... */
    800		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
    801				      ios->power_mode ? ios->vdd : 0);
    802}
    803
    804static int usdhi6_reset(struct usdhi6_host *host)
    805{
    806	int i;
    807
    808	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
    809	cpu_relax();
    810	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
    811	for (i = 1000; i; i--)
    812		if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
    813			break;
    814
    815	return i ? 0 : -ETIMEDOUT;
    816}
    817
    818static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
    819{
    820	struct usdhi6_host *host = mmc_priv(mmc);
    821	u32 option, mode;
    822	int ret;
    823
    824	dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
    825		ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
    826
    827	switch (ios->power_mode) {
    828	case MMC_POWER_OFF:
    829		usdhi6_set_power(host, ios);
    830		usdhi6_only_cd(host);
    831		break;
    832	case MMC_POWER_UP:
    833		/*
    834		 * We only also touch USDHI6_SD_OPTION from .request(), which
    835		 * cannot race with MMC_POWER_UP
    836		 */
    837		ret = usdhi6_reset(host);
    838		if (ret < 0) {
    839			dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
    840		} else {
    841			usdhi6_set_power(host, ios);
    842			usdhi6_only_cd(host);
    843		}
    844		break;
    845	case MMC_POWER_ON:
    846		option = usdhi6_read(host, USDHI6_SD_OPTION);
    847		/*
    848		 * The eMMC standard only allows 4 or 8 bits in the DDR mode,
    849		 * the same probably holds for SD cards. We check here anyway,
    850		 * since the datasheet explicitly requires 4 bits for DDR.
    851		 */
    852		if (ios->bus_width == MMC_BUS_WIDTH_1) {
    853			if (ios->timing == MMC_TIMING_UHS_DDR50)
    854				dev_err(mmc_dev(mmc),
    855					"4 bits are required for DDR\n");
    856			option |= USDHI6_SD_OPTION_WIDTH_1;
    857			mode = 0;
    858		} else {
    859			option &= ~USDHI6_SD_OPTION_WIDTH_1;
    860			mode = ios->timing == MMC_TIMING_UHS_DDR50;
    861		}
    862		usdhi6_write(host, USDHI6_SD_OPTION, option);
    863		usdhi6_write(host, USDHI6_SDIF_MODE, mode);
    864		break;
    865	}
    866
    867	if (host->rate != ios->clock)
    868		usdhi6_clk_set(host, ios);
    869}
    870
    871/* This is data timeout. Response timeout is fixed to 640 clock cycles */
    872static void usdhi6_timeout_set(struct usdhi6_host *host)
    873{
    874	struct mmc_request *mrq = host->mrq;
    875	u32 val;
    876	unsigned long ticks;
    877
    878	if (!mrq->data)
    879		ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
    880	else
    881		ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
    882			mrq->data->timeout_clks;
    883
    884	if (!ticks || ticks > 1 << 27)
    885		/* Max timeout */
    886		val = 14;
    887	else if (ticks < 1 << 13)
    888		/* Min timeout */
    889		val = 0;
    890	else
    891		val = order_base_2(ticks) - 13;
    892
    893	dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
    894		mrq->data ? "data" : "cmd", ticks, host->rate);
    895
    896	/* Timeout Counter mask: 0xf0 */
    897	usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
    898		     (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
    899}
    900
    901static void usdhi6_request_done(struct usdhi6_host *host)
    902{
    903	struct mmc_request *mrq = host->mrq;
    904	struct mmc_data *data = mrq->data;
    905
    906	if (WARN(host->pg.page || host->head_pg.page,
    907		 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
    908		 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
    909		 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
    910		 data ? host->offset : 0, data ? data->blocks : 0,
    911		 data ? data->blksz : 0, data ? data->sg_len : 0))
    912		usdhi6_sg_unmap(host, true);
    913
    914	if (mrq->cmd->error ||
    915	    (data && data->error) ||
    916	    (mrq->stop && mrq->stop->error))
    917		dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
    918			__func__, mrq->cmd->opcode, data ? data->blocks : 0,
    919			data ? data->blksz : 0,
    920			mrq->cmd->error,
    921			data ? data->error : 1,
    922			mrq->stop ? mrq->stop->error : 1);
    923
    924	/* Disable DMA */
    925	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
    926	host->wait = USDHI6_WAIT_FOR_REQUEST;
    927	host->mrq = NULL;
    928
    929	mmc_request_done(host->mmc, mrq);
    930}
    931
    932static int usdhi6_cmd_flags(struct usdhi6_host *host)
    933{
    934	struct mmc_request *mrq = host->mrq;
    935	struct mmc_command *cmd = mrq->cmd;
    936	u16 opc = cmd->opcode;
    937
    938	if (host->app_cmd) {
    939		host->app_cmd = false;
    940		opc |= USDHI6_SD_CMD_APP;
    941	}
    942
    943	if (mrq->data) {
    944		opc |= USDHI6_SD_CMD_DATA;
    945
    946		if (mrq->data->flags & MMC_DATA_READ)
    947			opc |= USDHI6_SD_CMD_READ;
    948
    949		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
    950		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
    951		    (cmd->opcode == SD_IO_RW_EXTENDED &&
    952		     mrq->data->blocks > 1)) {
    953			opc |= USDHI6_SD_CMD_MULTI;
    954			if (!mrq->stop)
    955				opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
    956		}
    957
    958		switch (mmc_resp_type(cmd)) {
    959		case MMC_RSP_NONE:
    960			opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
    961			break;
    962		case MMC_RSP_R1:
    963			opc |= USDHI6_SD_CMD_MODE_RSP_R1;
    964			break;
    965		case MMC_RSP_R1B:
    966			opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
    967			break;
    968		case MMC_RSP_R2:
    969			opc |= USDHI6_SD_CMD_MODE_RSP_R2;
    970			break;
    971		case MMC_RSP_R3:
    972			opc |= USDHI6_SD_CMD_MODE_RSP_R3;
    973			break;
    974		default:
    975			dev_warn(mmc_dev(host->mmc),
    976				 "Unknown response type %d\n",
    977				 mmc_resp_type(cmd));
    978			return -EINVAL;
    979		}
    980	}
    981
    982	return opc;
    983}
    984
    985static int usdhi6_rq_start(struct usdhi6_host *host)
    986{
    987	struct mmc_request *mrq = host->mrq;
    988	struct mmc_command *cmd = mrq->cmd;
    989	struct mmc_data *data = mrq->data;
    990	int opc = usdhi6_cmd_flags(host);
    991	int i;
    992
    993	if (opc < 0)
    994		return opc;
    995
    996	for (i = 1000; i; i--) {
    997		if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
    998			break;
    999		usleep_range(10, 100);
   1000	}
   1001
   1002	if (!i) {
   1003		dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
   1004		return -EAGAIN;
   1005	}
   1006
   1007	if (data) {
   1008		bool use_dma;
   1009		int ret = 0;
   1010
   1011		host->page_idx = 0;
   1012
   1013		if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
   1014			switch (data->blksz) {
   1015			case 512:
   1016				break;
   1017			case 32:
   1018			case 64:
   1019			case 128:
   1020			case 256:
   1021				if (mrq->stop)
   1022					ret = -EINVAL;
   1023				break;
   1024			default:
   1025				ret = -EINVAL;
   1026			}
   1027		} else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
   1028			    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
   1029			   data->blksz != 512) {
   1030			ret = -EINVAL;
   1031		}
   1032
   1033		if (ret < 0) {
   1034			dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
   1035				 __func__, data->blocks, data->blksz);
   1036			return -EINVAL;
   1037		}
   1038
   1039		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
   1040		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
   1041		    (cmd->opcode == SD_IO_RW_EXTENDED &&
   1042		     data->blocks > 1))
   1043			usdhi6_sg_prep(host);
   1044
   1045		usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
   1046
   1047		if ((data->blksz >= USDHI6_MIN_DMA ||
   1048		     data->blocks > 1) &&
   1049		    (data->blksz % 4 ||
   1050		     data->sg->offset % 4))
   1051			dev_dbg(mmc_dev(host->mmc),
   1052				"Bad SG of %u: %ux%u @ %u\n", data->sg_len,
   1053				data->blksz, data->blocks, data->sg->offset);
   1054
   1055		/* Enable DMA for USDHI6_MIN_DMA bytes or more */
   1056		use_dma = data->blksz >= USDHI6_MIN_DMA &&
   1057			!(data->blksz % 4) &&
   1058			usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
   1059
   1060		if (use_dma)
   1061			usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
   1062
   1063		dev_dbg(mmc_dev(host->mmc),
   1064			"%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
   1065			__func__, cmd->opcode, data->blocks, data->blksz,
   1066			data->sg_len, use_dma ? "DMA" : "PIO",
   1067			data->flags & MMC_DATA_READ ? "read" : "write",
   1068			data->sg->offset, mrq->stop ? " + stop" : "");
   1069	} else {
   1070		dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
   1071			__func__, cmd->opcode);
   1072	}
   1073
   1074	/* We have to get a command completion interrupt with DMA too */
   1075	usdhi6_wait_for_resp(host);
   1076
   1077	host->wait = USDHI6_WAIT_FOR_CMD;
   1078	schedule_delayed_work(&host->timeout_work, host->timeout);
   1079
   1080	/* SEC bit is required to enable block counting by the core */
   1081	usdhi6_write(host, USDHI6_SD_STOP,
   1082		     data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
   1083	usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
   1084
   1085	/* Kick command execution */
   1086	usdhi6_write(host, USDHI6_SD_CMD, opc);
   1087
   1088	return 0;
   1089}
   1090
   1091static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
   1092{
   1093	struct usdhi6_host *host = mmc_priv(mmc);
   1094	int ret;
   1095
   1096	cancel_delayed_work_sync(&host->timeout_work);
   1097
   1098	host->mrq = mrq;
   1099	host->sg = NULL;
   1100
   1101	usdhi6_timeout_set(host);
   1102	ret = usdhi6_rq_start(host);
   1103	if (ret < 0) {
   1104		mrq->cmd->error = ret;
   1105		usdhi6_request_done(host);
   1106	}
   1107}
   1108
   1109static int usdhi6_get_cd(struct mmc_host *mmc)
   1110{
   1111	struct usdhi6_host *host = mmc_priv(mmc);
   1112	/* Read is atomic, no need to lock */
   1113	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
   1114
   1115/*
   1116 *	level	status.CD	CD_ACTIVE_HIGH	card present
   1117 *	1	0		0		0
   1118 *	1	0		1		1
   1119 *	0	1		0		1
   1120 *	0	1		1		0
   1121 */
   1122	return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
   1123}
   1124
   1125static int usdhi6_get_ro(struct mmc_host *mmc)
   1126{
   1127	struct usdhi6_host *host = mmc_priv(mmc);
   1128	/* No locking as above */
   1129	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
   1130
   1131/*
   1132 *	level	status.WP	RO_ACTIVE_HIGH	card read-only
   1133 *	1	0		0		0
   1134 *	1	0		1		1
   1135 *	0	1		0		1
   1136 *	0	1		1		0
   1137 */
   1138	return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
   1139}
   1140
   1141static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
   1142{
   1143	struct usdhi6_host *host = mmc_priv(mmc);
   1144
   1145	dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
   1146
   1147	if (enable) {
   1148		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
   1149		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
   1150		usdhi6_write(host, USDHI6_SDIO_MODE, 1);
   1151	} else {
   1152		usdhi6_write(host, USDHI6_SDIO_MODE, 0);
   1153		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
   1154		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
   1155	}
   1156}
   1157
   1158static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
   1159{
   1160	if (IS_ERR(host->pins_uhs))
   1161		return 0;
   1162
   1163	switch (voltage) {
   1164	case MMC_SIGNAL_VOLTAGE_180:
   1165	case MMC_SIGNAL_VOLTAGE_120:
   1166		return pinctrl_select_state(host->pinctrl,
   1167					    host->pins_uhs);
   1168
   1169	default:
   1170		return pinctrl_select_default_state(mmc_dev(host->mmc));
   1171	}
   1172}
   1173
   1174static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
   1175{
   1176	int ret;
   1177
   1178	ret = mmc_regulator_set_vqmmc(mmc, ios);
   1179	if (ret < 0)
   1180		return ret;
   1181
   1182	ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
   1183	if (ret)
   1184		dev_warn_once(mmc_dev(mmc),
   1185			      "Failed to set pinstate err=%d\n", ret);
   1186	return ret;
   1187}
   1188
   1189static int usdhi6_card_busy(struct mmc_host *mmc)
   1190{
   1191	struct usdhi6_host *host = mmc_priv(mmc);
   1192	u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2);
   1193
   1194	/* Card is busy if it is pulling dat[0] low */
   1195	return !(tmp & USDHI6_SD_INFO2_SDDAT0);
   1196}
   1197
   1198static const struct mmc_host_ops usdhi6_ops = {
   1199	.request	= usdhi6_request,
   1200	.set_ios	= usdhi6_set_ios,
   1201	.get_cd		= usdhi6_get_cd,
   1202	.get_ro		= usdhi6_get_ro,
   1203	.enable_sdio_irq = usdhi6_enable_sdio_irq,
   1204	.start_signal_voltage_switch = usdhi6_sig_volt_switch,
   1205	.card_busy = usdhi6_card_busy,
   1206};
   1207
   1208/*			State machine handlers				*/
   1209
   1210static void usdhi6_resp_cmd12(struct usdhi6_host *host)
   1211{
   1212	struct mmc_command *cmd = host->mrq->stop;
   1213	cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
   1214}
   1215
   1216static void usdhi6_resp_read(struct usdhi6_host *host)
   1217{
   1218	struct mmc_command *cmd = host->mrq->cmd;
   1219	u32 *rsp = cmd->resp, tmp = 0;
   1220	int i;
   1221
   1222/*
   1223 * RSP10	39-8
   1224 * RSP32	71-40
   1225 * RSP54	103-72
   1226 * RSP76	127-104
   1227 * R2-type response:
   1228 * resp[0]	= r[127..96]
   1229 * resp[1]	= r[95..64]
   1230 * resp[2]	= r[63..32]
   1231 * resp[3]	= r[31..0]
   1232 * Other responses:
   1233 * resp[0]	= r[39..8]
   1234 */
   1235
   1236	if (mmc_resp_type(cmd) == MMC_RSP_NONE)
   1237		return;
   1238
   1239	if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
   1240		dev_err(mmc_dev(host->mmc),
   1241			"CMD%d: response expected but is missing!\n", cmd->opcode);
   1242		return;
   1243	}
   1244
   1245	if (mmc_resp_type(cmd) & MMC_RSP_136)
   1246		for (i = 0; i < 4; i++) {
   1247			if (i)
   1248				rsp[3 - i] = tmp >> 24;
   1249			tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
   1250			rsp[3 - i] |= tmp << 8;
   1251		}
   1252	else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
   1253		 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
   1254		/* Read RSP54 to avoid conflict with auto CMD12 */
   1255		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
   1256	else
   1257		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
   1258
   1259	dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
   1260}
   1261
   1262static int usdhi6_blk_read(struct usdhi6_host *host)
   1263{
   1264	struct mmc_data *data = host->mrq->data;
   1265	u32 *p;
   1266	int i, rest;
   1267
   1268	if (host->io_error) {
   1269		data->error = usdhi6_error_code(host);
   1270		goto error;
   1271	}
   1272
   1273	if (host->pg.page) {
   1274		p = host->blk_page + host->offset;
   1275	} else {
   1276		p = usdhi6_sg_map(host);
   1277		if (!p) {
   1278			data->error = -ENOMEM;
   1279			goto error;
   1280		}
   1281	}
   1282
   1283	for (i = 0; i < data->blksz / 4; i++, p++)
   1284		*p = usdhi6_read(host, USDHI6_SD_BUF0);
   1285
   1286	rest = data->blksz % 4;
   1287	for (i = 0; i < (rest + 1) / 2; i++) {
   1288		u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
   1289		((u8 *)p)[2 * i] = ((u8 *)&d)[0];
   1290		if (rest > 1 && !i)
   1291			((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
   1292	}
   1293
   1294	return 0;
   1295
   1296error:
   1297	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
   1298	host->wait = USDHI6_WAIT_FOR_REQUEST;
   1299	return data->error;
   1300}
   1301
   1302static int usdhi6_blk_write(struct usdhi6_host *host)
   1303{
   1304	struct mmc_data *data = host->mrq->data;
   1305	u32 *p;
   1306	int i, rest;
   1307
   1308	if (host->io_error) {
   1309		data->error = usdhi6_error_code(host);
   1310		goto error;
   1311	}
   1312
   1313	if (host->pg.page) {
   1314		p = host->blk_page + host->offset;
   1315	} else {
   1316		p = usdhi6_sg_map(host);
   1317		if (!p) {
   1318			data->error = -ENOMEM;
   1319			goto error;
   1320		}
   1321	}
   1322
   1323	for (i = 0; i < data->blksz / 4; i++, p++)
   1324		usdhi6_write(host, USDHI6_SD_BUF0, *p);
   1325
   1326	rest = data->blksz % 4;
   1327	for (i = 0; i < (rest + 1) / 2; i++) {
   1328		u16 d;
   1329		((u8 *)&d)[0] = ((u8 *)p)[2 * i];
   1330		if (rest > 1 && !i)
   1331			((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
   1332		else
   1333			((u8 *)&d)[1] = 0;
   1334		usdhi6_write16(host, USDHI6_SD_BUF0, d);
   1335	}
   1336
   1337	return 0;
   1338
   1339error:
   1340	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
   1341	host->wait = USDHI6_WAIT_FOR_REQUEST;
   1342	return data->error;
   1343}
   1344
   1345static int usdhi6_stop_cmd(struct usdhi6_host *host)
   1346{
   1347	struct mmc_request *mrq = host->mrq;
   1348
   1349	switch (mrq->cmd->opcode) {
   1350	case MMC_READ_MULTIPLE_BLOCK:
   1351	case MMC_WRITE_MULTIPLE_BLOCK:
   1352		if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
   1353			host->wait = USDHI6_WAIT_FOR_STOP;
   1354			return 0;
   1355		}
   1356		fallthrough;	/* Unsupported STOP command */
   1357	default:
   1358		dev_err(mmc_dev(host->mmc),
   1359			"unsupported stop CMD%d for CMD%d\n",
   1360			mrq->stop->opcode, mrq->cmd->opcode);
   1361		mrq->stop->error = -EOPNOTSUPP;
   1362	}
   1363
   1364	return -EOPNOTSUPP;
   1365}
   1366
   1367static bool usdhi6_end_cmd(struct usdhi6_host *host)
   1368{
   1369	struct mmc_request *mrq = host->mrq;
   1370	struct mmc_command *cmd = mrq->cmd;
   1371
   1372	if (host->io_error) {
   1373		cmd->error = usdhi6_error_code(host);
   1374		return false;
   1375	}
   1376
   1377	usdhi6_resp_read(host);
   1378
   1379	if (!mrq->data)
   1380		return false;
   1381
   1382	if (host->dma_active) {
   1383		usdhi6_dma_kick(host);
   1384		if (!mrq->stop)
   1385			host->wait = USDHI6_WAIT_FOR_DMA;
   1386		else if (usdhi6_stop_cmd(host) < 0)
   1387			return false;
   1388	} else if (mrq->data->flags & MMC_DATA_READ) {
   1389		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
   1390		    (cmd->opcode == SD_IO_RW_EXTENDED &&
   1391		     mrq->data->blocks > 1))
   1392			host->wait = USDHI6_WAIT_FOR_MREAD;
   1393		else
   1394			host->wait = USDHI6_WAIT_FOR_READ;
   1395	} else {
   1396		if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
   1397		    (cmd->opcode == SD_IO_RW_EXTENDED &&
   1398		     mrq->data->blocks > 1))
   1399			host->wait = USDHI6_WAIT_FOR_MWRITE;
   1400		else
   1401			host->wait = USDHI6_WAIT_FOR_WRITE;
   1402	}
   1403
   1404	return true;
   1405}
   1406
   1407static bool usdhi6_read_block(struct usdhi6_host *host)
   1408{
   1409	/* ACCESS_END IRQ is already unmasked */
   1410	int ret = usdhi6_blk_read(host);
   1411
   1412	/*
   1413	 * Have to force unmapping both pages: the single block could have been
   1414	 * cross-page, in which case for single-block IO host->page_idx == 0.
   1415	 * So, if we don't force, the second page won't be unmapped.
   1416	 */
   1417	usdhi6_sg_unmap(host, true);
   1418
   1419	if (ret < 0)
   1420		return false;
   1421
   1422	host->wait = USDHI6_WAIT_FOR_DATA_END;
   1423	return true;
   1424}
   1425
   1426static bool usdhi6_mread_block(struct usdhi6_host *host)
   1427{
   1428	int ret = usdhi6_blk_read(host);
   1429
   1430	if (ret < 0)
   1431		return false;
   1432
   1433	usdhi6_sg_advance(host);
   1434
   1435	return !host->mrq->data->error &&
   1436		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
   1437}
   1438
   1439static bool usdhi6_write_block(struct usdhi6_host *host)
   1440{
   1441	int ret = usdhi6_blk_write(host);
   1442
   1443	/* See comment in usdhi6_read_block() */
   1444	usdhi6_sg_unmap(host, true);
   1445
   1446	if (ret < 0)
   1447		return false;
   1448
   1449	host->wait = USDHI6_WAIT_FOR_DATA_END;
   1450	return true;
   1451}
   1452
   1453static bool usdhi6_mwrite_block(struct usdhi6_host *host)
   1454{
   1455	int ret = usdhi6_blk_write(host);
   1456
   1457	if (ret < 0)
   1458		return false;
   1459
   1460	usdhi6_sg_advance(host);
   1461
   1462	return !host->mrq->data->error &&
   1463		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
   1464}
   1465
   1466/*			Interrupt & timeout handlers			*/
   1467
   1468static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
   1469{
   1470	struct usdhi6_host *host = dev_id;
   1471	struct mmc_request *mrq;
   1472	struct mmc_command *cmd;
   1473	struct mmc_data *data;
   1474	bool io_wait = false;
   1475
   1476	cancel_delayed_work_sync(&host->timeout_work);
   1477
   1478	mrq = host->mrq;
   1479	if (!mrq)
   1480		return IRQ_HANDLED;
   1481
   1482	cmd = mrq->cmd;
   1483	data = mrq->data;
   1484
   1485	switch (host->wait) {
   1486	case USDHI6_WAIT_FOR_REQUEST:
   1487		/* We're too late, the timeout has already kicked in */
   1488		return IRQ_HANDLED;
   1489	case USDHI6_WAIT_FOR_CMD:
   1490		/* Wait for data? */
   1491		io_wait = usdhi6_end_cmd(host);
   1492		break;
   1493	case USDHI6_WAIT_FOR_MREAD:
   1494		/* Wait for more data? */
   1495		io_wait = usdhi6_mread_block(host);
   1496		break;
   1497	case USDHI6_WAIT_FOR_READ:
   1498		/* Wait for data end? */
   1499		io_wait = usdhi6_read_block(host);
   1500		break;
   1501	case USDHI6_WAIT_FOR_MWRITE:
   1502		/* Wait data to write? */
   1503		io_wait = usdhi6_mwrite_block(host);
   1504		break;
   1505	case USDHI6_WAIT_FOR_WRITE:
   1506		/* Wait for data end? */
   1507		io_wait = usdhi6_write_block(host);
   1508		break;
   1509	case USDHI6_WAIT_FOR_DMA:
   1510		usdhi6_dma_check_error(host);
   1511		break;
   1512	case USDHI6_WAIT_FOR_STOP:
   1513		usdhi6_write(host, USDHI6_SD_STOP, 0);
   1514		if (host->io_error) {
   1515			int ret = usdhi6_error_code(host);
   1516			if (mrq->stop)
   1517				mrq->stop->error = ret;
   1518			else
   1519				mrq->data->error = ret;
   1520			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
   1521			break;
   1522		}
   1523		usdhi6_resp_cmd12(host);
   1524		mrq->stop->error = 0;
   1525		break;
   1526	case USDHI6_WAIT_FOR_DATA_END:
   1527		if (host->io_error) {
   1528			mrq->data->error = usdhi6_error_code(host);
   1529			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
   1530				 mrq->data->error);
   1531		}
   1532		break;
   1533	default:
   1534		cmd->error = -EFAULT;
   1535		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
   1536		usdhi6_request_done(host);
   1537		return IRQ_HANDLED;
   1538	}
   1539
   1540	if (io_wait) {
   1541		schedule_delayed_work(&host->timeout_work, host->timeout);
   1542		/* Wait for more data or ACCESS_END */
   1543		if (!host->dma_active)
   1544			usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
   1545		return IRQ_HANDLED;
   1546	}
   1547
   1548	if (!cmd->error) {
   1549		if (data) {
   1550			if (!data->error) {
   1551				if (host->wait != USDHI6_WAIT_FOR_STOP &&
   1552				    host->mrq->stop &&
   1553				    !host->mrq->stop->error &&
   1554				    !usdhi6_stop_cmd(host)) {
   1555					/* Sending STOP */
   1556					usdhi6_wait_for_resp(host);
   1557
   1558					schedule_delayed_work(&host->timeout_work,
   1559							      host->timeout);
   1560
   1561					return IRQ_HANDLED;
   1562				}
   1563
   1564				data->bytes_xfered = data->blocks * data->blksz;
   1565			} else {
   1566				/* Data error: might need to unmap the last page */
   1567				dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
   1568					 __func__, data->error);
   1569				usdhi6_sg_unmap(host, true);
   1570			}
   1571		} else if (cmd->opcode == MMC_APP_CMD) {
   1572			host->app_cmd = true;
   1573		}
   1574	}
   1575
   1576	usdhi6_request_done(host);
   1577
   1578	return IRQ_HANDLED;
   1579}
   1580
   1581static irqreturn_t usdhi6_sd(int irq, void *dev_id)
   1582{
   1583	struct usdhi6_host *host = dev_id;
   1584	u16 status, status2, error;
   1585
   1586	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
   1587		~USDHI6_SD_INFO1_CARD;
   1588	status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
   1589
   1590	usdhi6_only_cd(host);
   1591
   1592	dev_dbg(mmc_dev(host->mmc),
   1593		"IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
   1594
   1595	if (!status && !status2)
   1596		return IRQ_NONE;
   1597
   1598	error = status2 & USDHI6_SD_INFO2_ERR;
   1599
   1600	/* Ack / clear interrupts */
   1601	if (USDHI6_SD_INFO1_IRQ & status)
   1602		usdhi6_write(host, USDHI6_SD_INFO1,
   1603			     0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
   1604
   1605	if (USDHI6_SD_INFO2_IRQ & status2) {
   1606		if (error)
   1607			/* In error cases BWE and BRE aren't cleared automatically */
   1608			status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
   1609
   1610		usdhi6_write(host, USDHI6_SD_INFO2,
   1611			     0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
   1612	}
   1613
   1614	host->io_error = error;
   1615	host->irq_status = status;
   1616
   1617	if (error) {
   1618		/* Don't pollute the log with unsupported command timeouts */
   1619		if (host->wait != USDHI6_WAIT_FOR_CMD ||
   1620		    error != USDHI6_SD_INFO2_RSP_TOUT)
   1621			dev_warn(mmc_dev(host->mmc),
   1622				 "%s(): INFO2 error bits 0x%08x\n",
   1623				 __func__, error);
   1624		else
   1625			dev_dbg(mmc_dev(host->mmc),
   1626				"%s(): INFO2 error bits 0x%08x\n",
   1627				__func__, error);
   1628	}
   1629
   1630	return IRQ_WAKE_THREAD;
   1631}
   1632
   1633static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
   1634{
   1635	struct usdhi6_host *host = dev_id;
   1636	u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
   1637
   1638	dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
   1639
   1640	if (!status)
   1641		return IRQ_NONE;
   1642
   1643	usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
   1644
   1645	mmc_signal_sdio_irq(host->mmc);
   1646
   1647	return IRQ_HANDLED;
   1648}
   1649
   1650static irqreturn_t usdhi6_cd(int irq, void *dev_id)
   1651{
   1652	struct usdhi6_host *host = dev_id;
   1653	struct mmc_host *mmc = host->mmc;
   1654	u16 status;
   1655
   1656	/* We're only interested in hotplug events here */
   1657	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
   1658		USDHI6_SD_INFO1_CARD;
   1659
   1660	if (!status)
   1661		return IRQ_NONE;
   1662
   1663	/* Ack */
   1664	usdhi6_write(host, USDHI6_SD_INFO1, ~status);
   1665
   1666	if (!work_pending(&mmc->detect.work) &&
   1667	    (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
   1668	      !mmc->card) ||
   1669	     ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
   1670	      mmc->card)))
   1671		mmc_detect_change(mmc, msecs_to_jiffies(100));
   1672
   1673	return IRQ_HANDLED;
   1674}
   1675
   1676/*
   1677 * Actually this should not be needed, if the built-in timeout works reliably in
   1678 * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
   1679 * handler might be the only way to catch the error.
   1680 */
   1681static void usdhi6_timeout_work(struct work_struct *work)
   1682{
   1683	struct delayed_work *d = to_delayed_work(work);
   1684	struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
   1685	struct mmc_request *mrq = host->mrq;
   1686	struct mmc_data *data = mrq ? mrq->data : NULL;
   1687	struct scatterlist *sg;
   1688
   1689	dev_warn(mmc_dev(host->mmc),
   1690		 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
   1691		 host->dma_active ? "DMA" : "PIO",
   1692		 host->wait, mrq ? mrq->cmd->opcode : -1,
   1693		 usdhi6_read(host, USDHI6_SD_INFO1),
   1694		 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
   1695
   1696	if (host->dma_active) {
   1697		usdhi6_dma_kill(host);
   1698		usdhi6_dma_stop_unmap(host);
   1699	}
   1700
   1701	switch (host->wait) {
   1702	default:
   1703		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
   1704		fallthrough;	/* mrq can be NULL, but is impossible */
   1705	case USDHI6_WAIT_FOR_CMD:
   1706		usdhi6_error_code(host);
   1707		if (mrq)
   1708			mrq->cmd->error = -ETIMEDOUT;
   1709		break;
   1710	case USDHI6_WAIT_FOR_STOP:
   1711		usdhi6_error_code(host);
   1712		mrq->stop->error = -ETIMEDOUT;
   1713		break;
   1714	case USDHI6_WAIT_FOR_DMA:
   1715	case USDHI6_WAIT_FOR_MREAD:
   1716	case USDHI6_WAIT_FOR_MWRITE:
   1717	case USDHI6_WAIT_FOR_READ:
   1718	case USDHI6_WAIT_FOR_WRITE:
   1719		sg = host->sg ?: data->sg;
   1720		dev_dbg(mmc_dev(host->mmc),
   1721			"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
   1722			data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
   1723			host->offset, data->blocks, data->blksz, data->sg_len,
   1724			sg_dma_len(sg), sg->offset);
   1725		usdhi6_sg_unmap(host, true);
   1726		fallthrough;	/* page unmapped in USDHI6_WAIT_FOR_DATA_END */
   1727	case USDHI6_WAIT_FOR_DATA_END:
   1728		usdhi6_error_code(host);
   1729		data->error = -ETIMEDOUT;
   1730	}
   1731
   1732	if (mrq)
   1733		usdhi6_request_done(host);
   1734}
   1735
   1736/*			 Probe / release				*/
   1737
   1738static const struct of_device_id usdhi6_of_match[] = {
   1739	{.compatible = "renesas,usdhi6rol0"},
   1740	{}
   1741};
   1742MODULE_DEVICE_TABLE(of, usdhi6_of_match);
   1743
   1744static int usdhi6_probe(struct platform_device *pdev)
   1745{
   1746	struct device *dev = &pdev->dev;
   1747	struct mmc_host *mmc;
   1748	struct usdhi6_host *host;
   1749	struct resource *res;
   1750	int irq_cd, irq_sd, irq_sdio;
   1751	u32 version;
   1752	int ret;
   1753
   1754	if (!dev->of_node)
   1755		return -ENODEV;
   1756
   1757	irq_cd = platform_get_irq_byname(pdev, "card detect");
   1758	irq_sd = platform_get_irq_byname(pdev, "data");
   1759	irq_sdio = platform_get_irq_byname(pdev, "SDIO");
   1760	if (irq_sd < 0 || irq_sdio < 0)
   1761		return -ENODEV;
   1762
   1763	mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
   1764	if (!mmc)
   1765		return -ENOMEM;
   1766
   1767	ret = mmc_regulator_get_supply(mmc);
   1768	if (ret)
   1769		goto e_free_mmc;
   1770
   1771	ret = mmc_of_parse(mmc);
   1772	if (ret < 0)
   1773		goto e_free_mmc;
   1774
   1775	host		= mmc_priv(mmc);
   1776	host->mmc	= mmc;
   1777	host->wait	= USDHI6_WAIT_FOR_REQUEST;
   1778	host->timeout	= msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
   1779	/*
   1780	 * We use a fixed timeout of 4s, hence inform the core about it. A
   1781	 * future improvement should instead respect the cmd->busy_timeout.
   1782	 */
   1783	mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
   1784
   1785	host->pinctrl = devm_pinctrl_get(&pdev->dev);
   1786	if (IS_ERR(host->pinctrl)) {
   1787		ret = PTR_ERR(host->pinctrl);
   1788		goto e_free_mmc;
   1789	}
   1790
   1791	host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
   1792
   1793	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1794	host->base = devm_ioremap_resource(dev, res);
   1795	if (IS_ERR(host->base)) {
   1796		ret = PTR_ERR(host->base);
   1797		goto e_free_mmc;
   1798	}
   1799
   1800	host->clk = devm_clk_get(dev, NULL);
   1801	if (IS_ERR(host->clk)) {
   1802		ret = PTR_ERR(host->clk);
   1803		goto e_free_mmc;
   1804	}
   1805
   1806	host->imclk = clk_get_rate(host->clk);
   1807
   1808	ret = clk_prepare_enable(host->clk);
   1809	if (ret < 0)
   1810		goto e_free_mmc;
   1811
   1812	version = usdhi6_read(host, USDHI6_VERSION);
   1813	if ((version & 0xfff) != 0xa0d) {
   1814		ret = -EPERM;
   1815		dev_err(dev, "Version not recognized %x\n", version);
   1816		goto e_clk_off;
   1817	}
   1818
   1819	dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
   1820		 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
   1821
   1822	usdhi6_mask_all(host);
   1823
   1824	if (irq_cd >= 0) {
   1825		ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
   1826				       dev_name(dev), host);
   1827		if (ret < 0)
   1828			goto e_clk_off;
   1829	} else {
   1830		mmc->caps |= MMC_CAP_NEEDS_POLL;
   1831	}
   1832
   1833	ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
   1834			       dev_name(dev), host);
   1835	if (ret < 0)
   1836		goto e_clk_off;
   1837
   1838	ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
   1839			       dev_name(dev), host);
   1840	if (ret < 0)
   1841		goto e_clk_off;
   1842
   1843	INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
   1844
   1845	usdhi6_dma_request(host, res->start);
   1846
   1847	mmc->ops = &usdhi6_ops;
   1848	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
   1849		     MMC_CAP_SDIO_IRQ;
   1850	/* Set .max_segs to some random number. Feel free to adjust. */
   1851	mmc->max_segs = 32;
   1852	mmc->max_blk_size = 512;
   1853	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
   1854	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
   1855	/*
   1856	 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
   1857	 * But OTOH, having large segments makes DMA more efficient. We could
   1858	 * check, whether we managed to get DMA and fall back to 1 page
   1859	 * segments, but if we do manage to obtain DMA and then it fails at
   1860	 * run-time and we fall back to PIO, we will continue getting large
   1861	 * segments. So, we wouldn't be able to get rid of the code anyway.
   1862	 */
   1863	mmc->max_seg_size = mmc->max_req_size;
   1864	if (!mmc->f_max)
   1865		mmc->f_max = host->imclk;
   1866	mmc->f_min = host->imclk / 512;
   1867
   1868	platform_set_drvdata(pdev, host);
   1869
   1870	ret = mmc_add_host(mmc);
   1871	if (ret < 0)
   1872		goto e_release_dma;
   1873
   1874	return 0;
   1875
   1876e_release_dma:
   1877	usdhi6_dma_release(host);
   1878e_clk_off:
   1879	clk_disable_unprepare(host->clk);
   1880e_free_mmc:
   1881	mmc_free_host(mmc);
   1882
   1883	return ret;
   1884}
   1885
   1886static int usdhi6_remove(struct platform_device *pdev)
   1887{
   1888	struct usdhi6_host *host = platform_get_drvdata(pdev);
   1889
   1890	mmc_remove_host(host->mmc);
   1891
   1892	usdhi6_mask_all(host);
   1893	cancel_delayed_work_sync(&host->timeout_work);
   1894	usdhi6_dma_release(host);
   1895	clk_disable_unprepare(host->clk);
   1896	mmc_free_host(host->mmc);
   1897
   1898	return 0;
   1899}
   1900
   1901static struct platform_driver usdhi6_driver = {
   1902	.probe		= usdhi6_probe,
   1903	.remove		= usdhi6_remove,
   1904	.driver		= {
   1905		.name	= "usdhi6rol0",
   1906		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
   1907		.of_match_table = usdhi6_of_match,
   1908	},
   1909};
   1910
   1911module_platform_driver(usdhi6_driver);
   1912
   1913MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
   1914MODULE_LICENSE("GPL v2");
   1915MODULE_ALIAS("platform:usdhi6rol0");
   1916MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");