cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sdhci.c (129607B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
      4 *
      5 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
      6 *
      7 * Thanks to the following companies for their support:
      8 *
      9 *     - JMicron (hardware and technical support)
     10 */
     11
     12#include <linux/bitfield.h>
     13#include <linux/delay.h>
     14#include <linux/dmaengine.h>
     15#include <linux/ktime.h>
     16#include <linux/highmem.h>
     17#include <linux/io.h>
     18#include <linux/module.h>
     19#include <linux/dma-mapping.h>
     20#include <linux/slab.h>
     21#include <linux/scatterlist.h>
     22#include <linux/sizes.h>
     23#include <linux/regulator/consumer.h>
     24#include <linux/pm_runtime.h>
     25#include <linux/of.h>
     26
     27#include <linux/leds.h>
     28
     29#include <linux/mmc/mmc.h>
     30#include <linux/mmc/host.h>
     31#include <linux/mmc/card.h>
     32#include <linux/mmc/sdio.h>
     33#include <linux/mmc/slot-gpio.h>
     34
     35#include "sdhci.h"
     36
     37#define DRIVER_NAME "sdhci"
     38
     39#define DBG(f, x...) \
     40	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
     41
     42#define SDHCI_DUMP(f, x...) \
     43	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
     44
     45#define MAX_TUNING_LOOP 40
     46
     47static unsigned int debug_quirks = 0;
     48static unsigned int debug_quirks2;
     49
     50static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
     51
     52static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
     53
     54void sdhci_dumpregs(struct sdhci_host *host)
     55{
     56	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
     57
     58	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
     59		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
     60		   sdhci_readw(host, SDHCI_HOST_VERSION));
     61	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
     62		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
     63		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
     64	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
     65		   sdhci_readl(host, SDHCI_ARGUMENT),
     66		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
     67	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
     68		   sdhci_readl(host, SDHCI_PRESENT_STATE),
     69		   sdhci_readb(host, SDHCI_HOST_CONTROL));
     70	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
     71		   sdhci_readb(host, SDHCI_POWER_CONTROL),
     72		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
     73	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
     74		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
     75		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
     76	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
     77		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
     78		   sdhci_readl(host, SDHCI_INT_STATUS));
     79	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
     80		   sdhci_readl(host, SDHCI_INT_ENABLE),
     81		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
     82	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
     83		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
     84		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
     85	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
     86		   sdhci_readl(host, SDHCI_CAPABILITIES),
     87		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
     88	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
     89		   sdhci_readw(host, SDHCI_COMMAND),
     90		   sdhci_readl(host, SDHCI_MAX_CURRENT));
     91	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
     92		   sdhci_readl(host, SDHCI_RESPONSE),
     93		   sdhci_readl(host, SDHCI_RESPONSE + 4));
     94	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
     95		   sdhci_readl(host, SDHCI_RESPONSE + 8),
     96		   sdhci_readl(host, SDHCI_RESPONSE + 12));
     97	SDHCI_DUMP("Host ctl2: 0x%08x\n",
     98		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
     99
    100	if (host->flags & SDHCI_USE_ADMA) {
    101		if (host->flags & SDHCI_USE_64_BIT_DMA) {
    102			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
    103				   sdhci_readl(host, SDHCI_ADMA_ERROR),
    104				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
    105				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
    106		} else {
    107			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
    108				   sdhci_readl(host, SDHCI_ADMA_ERROR),
    109				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
    110		}
    111	}
    112
    113	if (host->ops->dump_vendor_regs)
    114		host->ops->dump_vendor_regs(host);
    115
    116	SDHCI_DUMP("============================================\n");
    117}
    118EXPORT_SYMBOL_GPL(sdhci_dumpregs);
    119
    120/*****************************************************************************\
    121 *                                                                           *
    122 * Low level functions                                                       *
    123 *                                                                           *
    124\*****************************************************************************/
    125
    126static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
    127{
    128	u16 ctrl2;
    129
    130	ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
    131	if (ctrl2 & SDHCI_CTRL_V4_MODE)
    132		return;
    133
    134	ctrl2 |= SDHCI_CTRL_V4_MODE;
    135	sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
    136}
    137
    138/*
    139 * This can be called before sdhci_add_host() by Vendor's host controller
    140 * driver to enable v4 mode if supported.
    141 */
    142void sdhci_enable_v4_mode(struct sdhci_host *host)
    143{
    144	host->v4_mode = true;
    145	sdhci_do_enable_v4_mode(host);
    146}
    147EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
    148
    149static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
    150{
    151	return cmd->data || cmd->flags & MMC_RSP_BUSY;
    152}
    153
    154static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
    155{
    156	u32 present;
    157
    158	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
    159	    !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
    160		return;
    161
    162	if (enable) {
    163		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
    164				      SDHCI_CARD_PRESENT;
    165
    166		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
    167				       SDHCI_INT_CARD_INSERT;
    168	} else {
    169		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
    170	}
    171
    172	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
    173	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
    174}
    175
    176static void sdhci_enable_card_detection(struct sdhci_host *host)
    177{
    178	sdhci_set_card_detection(host, true);
    179}
    180
    181static void sdhci_disable_card_detection(struct sdhci_host *host)
    182{
    183	sdhci_set_card_detection(host, false);
    184}
    185
    186static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
    187{
    188	if (host->bus_on)
    189		return;
    190	host->bus_on = true;
    191	pm_runtime_get_noresume(mmc_dev(host->mmc));
    192}
    193
    194static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
    195{
    196	if (!host->bus_on)
    197		return;
    198	host->bus_on = false;
    199	pm_runtime_put_noidle(mmc_dev(host->mmc));
    200}
    201
    202void sdhci_reset(struct sdhci_host *host, u8 mask)
    203{
    204	ktime_t timeout;
    205
    206	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
    207
    208	if (mask & SDHCI_RESET_ALL) {
    209		host->clock = 0;
    210		/* Reset-all turns off SD Bus Power */
    211		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
    212			sdhci_runtime_pm_bus_off(host);
    213	}
    214
    215	/* Wait max 100 ms */
    216	timeout = ktime_add_ms(ktime_get(), 100);
    217
    218	/* hw clears the bit when it's done */
    219	while (1) {
    220		bool timedout = ktime_after(ktime_get(), timeout);
    221
    222		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
    223			break;
    224		if (timedout) {
    225			pr_err("%s: Reset 0x%x never completed.\n",
    226				mmc_hostname(host->mmc), (int)mask);
    227			sdhci_dumpregs(host);
    228			return;
    229		}
    230		udelay(10);
    231	}
    232}
    233EXPORT_SYMBOL_GPL(sdhci_reset);
    234
    235static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
    236{
    237	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
    238		struct mmc_host *mmc = host->mmc;
    239
    240		if (!mmc->ops->get_cd(mmc))
    241			return;
    242	}
    243
    244	host->ops->reset(host, mask);
    245
    246	if (mask & SDHCI_RESET_ALL) {
    247		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
    248			if (host->ops->enable_dma)
    249				host->ops->enable_dma(host);
    250		}
    251
    252		/* Resetting the controller clears many */
    253		host->preset_enabled = false;
    254	}
    255}
    256
    257static void sdhci_set_default_irqs(struct sdhci_host *host)
    258{
    259	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
    260		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
    261		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
    262		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
    263		    SDHCI_INT_RESPONSE;
    264
    265	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
    266	    host->tuning_mode == SDHCI_TUNING_MODE_3)
    267		host->ier |= SDHCI_INT_RETUNE;
    268
    269	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
    270	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
    271}
    272
    273static void sdhci_config_dma(struct sdhci_host *host)
    274{
    275	u8 ctrl;
    276	u16 ctrl2;
    277
    278	if (host->version < SDHCI_SPEC_200)
    279		return;
    280
    281	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
    282
    283	/*
    284	 * Always adjust the DMA selection as some controllers
    285	 * (e.g. JMicron) can't do PIO properly when the selection
    286	 * is ADMA.
    287	 */
    288	ctrl &= ~SDHCI_CTRL_DMA_MASK;
    289	if (!(host->flags & SDHCI_REQ_USE_DMA))
    290		goto out;
    291
    292	/* Note if DMA Select is zero then SDMA is selected */
    293	if (host->flags & SDHCI_USE_ADMA)
    294		ctrl |= SDHCI_CTRL_ADMA32;
    295
    296	if (host->flags & SDHCI_USE_64_BIT_DMA) {
    297		/*
    298		 * If v4 mode, all supported DMA can be 64-bit addressing if
    299		 * controller supports 64-bit system address, otherwise only
    300		 * ADMA can support 64-bit addressing.
    301		 */
    302		if (host->v4_mode) {
    303			ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
    304			ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
    305			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
    306		} else if (host->flags & SDHCI_USE_ADMA) {
    307			/*
    308			 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
    309			 * set SDHCI_CTRL_ADMA64.
    310			 */
    311			ctrl |= SDHCI_CTRL_ADMA64;
    312		}
    313	}
    314
    315out:
    316	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
    317}
    318
    319static void sdhci_init(struct sdhci_host *host, int soft)
    320{
    321	struct mmc_host *mmc = host->mmc;
    322	unsigned long flags;
    323
    324	if (soft)
    325		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
    326	else
    327		sdhci_do_reset(host, SDHCI_RESET_ALL);
    328
    329	if (host->v4_mode)
    330		sdhci_do_enable_v4_mode(host);
    331
    332	spin_lock_irqsave(&host->lock, flags);
    333	sdhci_set_default_irqs(host);
    334	spin_unlock_irqrestore(&host->lock, flags);
    335
    336	host->cqe_on = false;
    337
    338	if (soft) {
    339		/* force clock reconfiguration */
    340		host->clock = 0;
    341		mmc->ops->set_ios(mmc, &mmc->ios);
    342	}
    343}
    344
    345static void sdhci_reinit(struct sdhci_host *host)
    346{
    347	u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
    348
    349	sdhci_init(host, 0);
    350	sdhci_enable_card_detection(host);
    351
    352	/*
    353	 * A change to the card detect bits indicates a change in present state,
    354	 * refer sdhci_set_card_detection(). A card detect interrupt might have
    355	 * been missed while the host controller was being reset, so trigger a
    356	 * rescan to check.
    357	 */
    358	if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
    359		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
    360}
    361
    362static void __sdhci_led_activate(struct sdhci_host *host)
    363{
    364	u8 ctrl;
    365
    366	if (host->quirks & SDHCI_QUIRK_NO_LED)
    367		return;
    368
    369	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
    370	ctrl |= SDHCI_CTRL_LED;
    371	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
    372}
    373
    374static void __sdhci_led_deactivate(struct sdhci_host *host)
    375{
    376	u8 ctrl;
    377
    378	if (host->quirks & SDHCI_QUIRK_NO_LED)
    379		return;
    380
    381	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
    382	ctrl &= ~SDHCI_CTRL_LED;
    383	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
    384}
    385
    386#if IS_REACHABLE(CONFIG_LEDS_CLASS)
    387static void sdhci_led_control(struct led_classdev *led,
    388			      enum led_brightness brightness)
    389{
    390	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
    391	unsigned long flags;
    392
    393	spin_lock_irqsave(&host->lock, flags);
    394
    395	if (host->runtime_suspended)
    396		goto out;
    397
    398	if (brightness == LED_OFF)
    399		__sdhci_led_deactivate(host);
    400	else
    401		__sdhci_led_activate(host);
    402out:
    403	spin_unlock_irqrestore(&host->lock, flags);
    404}
    405
    406static int sdhci_led_register(struct sdhci_host *host)
    407{
    408	struct mmc_host *mmc = host->mmc;
    409
    410	if (host->quirks & SDHCI_QUIRK_NO_LED)
    411		return 0;
    412
    413	snprintf(host->led_name, sizeof(host->led_name),
    414		 "%s::", mmc_hostname(mmc));
    415
    416	host->led.name = host->led_name;
    417	host->led.brightness = LED_OFF;
    418	host->led.default_trigger = mmc_hostname(mmc);
    419	host->led.brightness_set = sdhci_led_control;
    420
    421	return led_classdev_register(mmc_dev(mmc), &host->led);
    422}
    423
    424static void sdhci_led_unregister(struct sdhci_host *host)
    425{
    426	if (host->quirks & SDHCI_QUIRK_NO_LED)
    427		return;
    428
    429	led_classdev_unregister(&host->led);
    430}
    431
    432static inline void sdhci_led_activate(struct sdhci_host *host)
    433{
    434}
    435
    436static inline void sdhci_led_deactivate(struct sdhci_host *host)
    437{
    438}
    439
    440#else
    441
    442static inline int sdhci_led_register(struct sdhci_host *host)
    443{
    444	return 0;
    445}
    446
    447static inline void sdhci_led_unregister(struct sdhci_host *host)
    448{
    449}
    450
    451static inline void sdhci_led_activate(struct sdhci_host *host)
    452{
    453	__sdhci_led_activate(host);
    454}
    455
    456static inline void sdhci_led_deactivate(struct sdhci_host *host)
    457{
    458	__sdhci_led_deactivate(host);
    459}
    460
    461#endif
    462
    463static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
    464			    unsigned long timeout)
    465{
    466	if (sdhci_data_line_cmd(mrq->cmd))
    467		mod_timer(&host->data_timer, timeout);
    468	else
    469		mod_timer(&host->timer, timeout);
    470}
    471
    472static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
    473{
    474	if (sdhci_data_line_cmd(mrq->cmd))
    475		del_timer(&host->data_timer);
    476	else
    477		del_timer(&host->timer);
    478}
    479
    480static inline bool sdhci_has_requests(struct sdhci_host *host)
    481{
    482	return host->cmd || host->data_cmd;
    483}
    484
    485/*****************************************************************************\
    486 *                                                                           *
    487 * Core functions                                                            *
    488 *                                                                           *
    489\*****************************************************************************/
    490
    491static void sdhci_read_block_pio(struct sdhci_host *host)
    492{
    493	unsigned long flags;
    494	size_t blksize, len, chunk;
    495	u32 scratch;
    496	u8 *buf;
    497
    498	DBG("PIO reading\n");
    499
    500	blksize = host->data->blksz;
    501	chunk = 0;
    502
    503	local_irq_save(flags);
    504
    505	while (blksize) {
    506		BUG_ON(!sg_miter_next(&host->sg_miter));
    507
    508		len = min(host->sg_miter.length, blksize);
    509
    510		blksize -= len;
    511		host->sg_miter.consumed = len;
    512
    513		buf = host->sg_miter.addr;
    514
    515		while (len) {
    516			if (chunk == 0) {
    517				scratch = sdhci_readl(host, SDHCI_BUFFER);
    518				chunk = 4;
    519			}
    520
    521			*buf = scratch & 0xFF;
    522
    523			buf++;
    524			scratch >>= 8;
    525			chunk--;
    526			len--;
    527		}
    528	}
    529
    530	sg_miter_stop(&host->sg_miter);
    531
    532	local_irq_restore(flags);
    533}
    534
    535static void sdhci_write_block_pio(struct sdhci_host *host)
    536{
    537	unsigned long flags;
    538	size_t blksize, len, chunk;
    539	u32 scratch;
    540	u8 *buf;
    541
    542	DBG("PIO writing\n");
    543
    544	blksize = host->data->blksz;
    545	chunk = 0;
    546	scratch = 0;
    547
    548	local_irq_save(flags);
    549
    550	while (blksize) {
    551		BUG_ON(!sg_miter_next(&host->sg_miter));
    552
    553		len = min(host->sg_miter.length, blksize);
    554
    555		blksize -= len;
    556		host->sg_miter.consumed = len;
    557
    558		buf = host->sg_miter.addr;
    559
    560		while (len) {
    561			scratch |= (u32)*buf << (chunk * 8);
    562
    563			buf++;
    564			chunk++;
    565			len--;
    566
    567			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
    568				sdhci_writel(host, scratch, SDHCI_BUFFER);
    569				chunk = 0;
    570				scratch = 0;
    571			}
    572		}
    573	}
    574
    575	sg_miter_stop(&host->sg_miter);
    576
    577	local_irq_restore(flags);
    578}
    579
    580static void sdhci_transfer_pio(struct sdhci_host *host)
    581{
    582	u32 mask;
    583
    584	if (host->blocks == 0)
    585		return;
    586
    587	if (host->data->flags & MMC_DATA_READ)
    588		mask = SDHCI_DATA_AVAILABLE;
    589	else
    590		mask = SDHCI_SPACE_AVAILABLE;
    591
    592	/*
    593	 * Some controllers (JMicron JMB38x) mess up the buffer bits
    594	 * for transfers < 4 bytes. As long as it is just one block,
    595	 * we can ignore the bits.
    596	 */
    597	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
    598		(host->data->blocks == 1))
    599		mask = ~0;
    600
    601	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
    602		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
    603			udelay(100);
    604
    605		if (host->data->flags & MMC_DATA_READ)
    606			sdhci_read_block_pio(host);
    607		else
    608			sdhci_write_block_pio(host);
    609
    610		host->blocks--;
    611		if (host->blocks == 0)
    612			break;
    613	}
    614
    615	DBG("PIO transfer complete.\n");
    616}
    617
    618static int sdhci_pre_dma_transfer(struct sdhci_host *host,
    619				  struct mmc_data *data, int cookie)
    620{
    621	int sg_count;
    622
    623	/*
    624	 * If the data buffers are already mapped, return the previous
    625	 * dma_map_sg() result.
    626	 */
    627	if (data->host_cookie == COOKIE_PRE_MAPPED)
    628		return data->sg_count;
    629
    630	/* Bounce write requests to the bounce buffer */
    631	if (host->bounce_buffer) {
    632		unsigned int length = data->blksz * data->blocks;
    633
    634		if (length > host->bounce_buffer_size) {
    635			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
    636			       mmc_hostname(host->mmc), length,
    637			       host->bounce_buffer_size);
    638			return -EIO;
    639		}
    640		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
    641			/* Copy the data to the bounce buffer */
    642			if (host->ops->copy_to_bounce_buffer) {
    643				host->ops->copy_to_bounce_buffer(host,
    644								 data, length);
    645			} else {
    646				sg_copy_to_buffer(data->sg, data->sg_len,
    647						  host->bounce_buffer, length);
    648			}
    649		}
    650		/* Switch ownership to the DMA */
    651		dma_sync_single_for_device(mmc_dev(host->mmc),
    652					   host->bounce_addr,
    653					   host->bounce_buffer_size,
    654					   mmc_get_dma_dir(data));
    655		/* Just a dummy value */
    656		sg_count = 1;
    657	} else {
    658		/* Just access the data directly from memory */
    659		sg_count = dma_map_sg(mmc_dev(host->mmc),
    660				      data->sg, data->sg_len,
    661				      mmc_get_dma_dir(data));
    662	}
    663
    664	if (sg_count == 0)
    665		return -ENOSPC;
    666
    667	data->sg_count = sg_count;
    668	data->host_cookie = cookie;
    669
    670	return sg_count;
    671}
    672
    673static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
    674{
    675	local_irq_save(*flags);
    676	return kmap_atomic(sg_page(sg)) + sg->offset;
    677}
    678
    679static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
    680{
    681	kunmap_atomic(buffer);
    682	local_irq_restore(*flags);
    683}
    684
    685void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
    686			   dma_addr_t addr, int len, unsigned int cmd)
    687{
    688	struct sdhci_adma2_64_desc *dma_desc = *desc;
    689
    690	/* 32-bit and 64-bit descriptors have these members in same position */
    691	dma_desc->cmd = cpu_to_le16(cmd);
    692	dma_desc->len = cpu_to_le16(len);
    693	dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
    694
    695	if (host->flags & SDHCI_USE_64_BIT_DMA)
    696		dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
    697
    698	*desc += host->desc_sz;
    699}
    700EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
    701
    702static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
    703					   void **desc, dma_addr_t addr,
    704					   int len, unsigned int cmd)
    705{
    706	if (host->ops->adma_write_desc)
    707		host->ops->adma_write_desc(host, desc, addr, len, cmd);
    708	else
    709		sdhci_adma_write_desc(host, desc, addr, len, cmd);
    710}
    711
    712static void sdhci_adma_mark_end(void *desc)
    713{
    714	struct sdhci_adma2_64_desc *dma_desc = desc;
    715
    716	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
    717	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
    718}
    719
    720static void sdhci_adma_table_pre(struct sdhci_host *host,
    721	struct mmc_data *data, int sg_count)
    722{
    723	struct scatterlist *sg;
    724	unsigned long flags;
    725	dma_addr_t addr, align_addr;
    726	void *desc, *align;
    727	char *buffer;
    728	int len, offset, i;
    729
    730	/*
    731	 * The spec does not specify endianness of descriptor table.
    732	 * We currently guess that it is LE.
    733	 */
    734
    735	host->sg_count = sg_count;
    736
    737	desc = host->adma_table;
    738	align = host->align_buffer;
    739
    740	align_addr = host->align_addr;
    741
    742	for_each_sg(data->sg, sg, host->sg_count, i) {
    743		addr = sg_dma_address(sg);
    744		len = sg_dma_len(sg);
    745
    746		/*
    747		 * The SDHCI specification states that ADMA addresses must
    748		 * be 32-bit aligned. If they aren't, then we use a bounce
    749		 * buffer for the (up to three) bytes that screw up the
    750		 * alignment.
    751		 */
    752		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
    753			 SDHCI_ADMA2_MASK;
    754		if (offset) {
    755			if (data->flags & MMC_DATA_WRITE) {
    756				buffer = sdhci_kmap_atomic(sg, &flags);
    757				memcpy(align, buffer, offset);
    758				sdhci_kunmap_atomic(buffer, &flags);
    759			}
    760
    761			/* tran, valid */
    762			__sdhci_adma_write_desc(host, &desc, align_addr,
    763						offset, ADMA2_TRAN_VALID);
    764
    765			BUG_ON(offset > 65536);
    766
    767			align += SDHCI_ADMA2_ALIGN;
    768			align_addr += SDHCI_ADMA2_ALIGN;
    769
    770			addr += offset;
    771			len -= offset;
    772		}
    773
    774		/*
    775		 * The block layer forces a minimum segment size of PAGE_SIZE,
    776		 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
    777		 * multiple descriptors, noting that the ADMA table is sized
    778		 * for 4KiB chunks anyway, so it will be big enough.
    779		 */
    780		while (len > host->max_adma) {
    781			int n = 32 * 1024; /* 32KiB*/
    782
    783			__sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
    784			addr += n;
    785			len -= n;
    786		}
    787
    788		/* tran, valid */
    789		if (len)
    790			__sdhci_adma_write_desc(host, &desc, addr, len,
    791						ADMA2_TRAN_VALID);
    792
    793		/*
    794		 * If this triggers then we have a calculation bug
    795		 * somewhere. :/
    796		 */
    797		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
    798	}
    799
    800	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
    801		/* Mark the last descriptor as the terminating descriptor */
    802		if (desc != host->adma_table) {
    803			desc -= host->desc_sz;
    804			sdhci_adma_mark_end(desc);
    805		}
    806	} else {
    807		/* Add a terminating entry - nop, end, valid */
    808		__sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
    809	}
    810}
    811
    812static void sdhci_adma_table_post(struct sdhci_host *host,
    813	struct mmc_data *data)
    814{
    815	struct scatterlist *sg;
    816	int i, size;
    817	void *align;
    818	char *buffer;
    819	unsigned long flags;
    820
    821	if (data->flags & MMC_DATA_READ) {
    822		bool has_unaligned = false;
    823
    824		/* Do a quick scan of the SG list for any unaligned mappings */
    825		for_each_sg(data->sg, sg, host->sg_count, i)
    826			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
    827				has_unaligned = true;
    828				break;
    829			}
    830
    831		if (has_unaligned) {
    832			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
    833					    data->sg_len, DMA_FROM_DEVICE);
    834
    835			align = host->align_buffer;
    836
    837			for_each_sg(data->sg, sg, host->sg_count, i) {
    838				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
    839					size = SDHCI_ADMA2_ALIGN -
    840					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
    841
    842					buffer = sdhci_kmap_atomic(sg, &flags);
    843					memcpy(buffer, align, size);
    844					sdhci_kunmap_atomic(buffer, &flags);
    845
    846					align += SDHCI_ADMA2_ALIGN;
    847				}
    848			}
    849		}
    850	}
    851}
    852
    853static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
    854{
    855	sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
    856	if (host->flags & SDHCI_USE_64_BIT_DMA)
    857		sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
    858}
    859
    860static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
    861{
    862	if (host->bounce_buffer)
    863		return host->bounce_addr;
    864	else
    865		return sg_dma_address(host->data->sg);
    866}
    867
    868static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
    869{
    870	if (host->v4_mode)
    871		sdhci_set_adma_addr(host, addr);
    872	else
    873		sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
    874}
    875
    876static unsigned int sdhci_target_timeout(struct sdhci_host *host,
    877					 struct mmc_command *cmd,
    878					 struct mmc_data *data)
    879{
    880	unsigned int target_timeout;
    881
    882	/* timeout in us */
    883	if (!data) {
    884		target_timeout = cmd->busy_timeout * 1000;
    885	} else {
    886		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
    887		if (host->clock && data->timeout_clks) {
    888			unsigned long long val;
    889
    890			/*
    891			 * data->timeout_clks is in units of clock cycles.
    892			 * host->clock is in Hz.  target_timeout is in us.
    893			 * Hence, us = 1000000 * cycles / Hz.  Round up.
    894			 */
    895			val = 1000000ULL * data->timeout_clks;
    896			if (do_div(val, host->clock))
    897				target_timeout++;
    898			target_timeout += val;
    899		}
    900	}
    901
    902	return target_timeout;
    903}
    904
    905static void sdhci_calc_sw_timeout(struct sdhci_host *host,
    906				  struct mmc_command *cmd)
    907{
    908	struct mmc_data *data = cmd->data;
    909	struct mmc_host *mmc = host->mmc;
    910	struct mmc_ios *ios = &mmc->ios;
    911	unsigned char bus_width = 1 << ios->bus_width;
    912	unsigned int blksz;
    913	unsigned int freq;
    914	u64 target_timeout;
    915	u64 transfer_time;
    916
    917	target_timeout = sdhci_target_timeout(host, cmd, data);
    918	target_timeout *= NSEC_PER_USEC;
    919
    920	if (data) {
    921		blksz = data->blksz;
    922		freq = mmc->actual_clock ? : host->clock;
    923		transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
    924		do_div(transfer_time, freq);
    925		/* multiply by '2' to account for any unknowns */
    926		transfer_time = transfer_time * 2;
    927		/* calculate timeout for the entire data */
    928		host->data_timeout = data->blocks * target_timeout +
    929				     transfer_time;
    930	} else {
    931		host->data_timeout = target_timeout;
    932	}
    933
    934	if (host->data_timeout)
    935		host->data_timeout += MMC_CMD_TRANSFER_TIME;
    936}
    937
    938static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
    939			     bool *too_big)
    940{
    941	u8 count;
    942	struct mmc_data *data;
    943	unsigned target_timeout, current_timeout;
    944
    945	*too_big = false;
    946
    947	/*
    948	 * If the host controller provides us with an incorrect timeout
    949	 * value, just skip the check and use the maximum. The hardware may take
    950	 * longer to time out, but that's much better than having a too-short
    951	 * timeout value.
    952	 */
    953	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
    954		return host->max_timeout_count;
    955
    956	/* Unspecified command, assume max */
    957	if (cmd == NULL)
    958		return host->max_timeout_count;
    959
    960	data = cmd->data;
    961	/* Unspecified timeout, assume max */
    962	if (!data && !cmd->busy_timeout)
    963		return host->max_timeout_count;
    964
    965	/* timeout in us */
    966	target_timeout = sdhci_target_timeout(host, cmd, data);
    967
    968	/*
    969	 * Figure out needed cycles.
    970	 * We do this in steps in order to fit inside a 32 bit int.
    971	 * The first step is the minimum timeout, which will have a
    972	 * minimum resolution of 6 bits:
    973	 * (1) 2^13*1000 > 2^22,
    974	 * (2) host->timeout_clk < 2^16
    975	 *     =>
    976	 *     (1) / (2) > 2^6
    977	 */
    978	count = 0;
    979	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
    980	while (current_timeout < target_timeout) {
    981		count++;
    982		current_timeout <<= 1;
    983		if (count > host->max_timeout_count) {
    984			if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
    985				DBG("Too large timeout 0x%x requested for CMD%d!\n",
    986				    count, cmd->opcode);
    987			count = host->max_timeout_count;
    988			*too_big = true;
    989			break;
    990		}
    991	}
    992
    993	return count;
    994}
    995
    996static void sdhci_set_transfer_irqs(struct sdhci_host *host)
    997{
    998	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
    999	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
   1000
   1001	if (host->flags & SDHCI_REQ_USE_DMA)
   1002		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
   1003	else
   1004		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
   1005
   1006	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
   1007		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
   1008	else
   1009		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
   1010
   1011	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   1012	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   1013}
   1014
   1015void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
   1016{
   1017	if (enable)
   1018		host->ier |= SDHCI_INT_DATA_TIMEOUT;
   1019	else
   1020		host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
   1021	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   1022	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   1023}
   1024EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
   1025
   1026void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
   1027{
   1028	bool too_big = false;
   1029	u8 count = sdhci_calc_timeout(host, cmd, &too_big);
   1030
   1031	if (too_big &&
   1032	    host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
   1033		sdhci_calc_sw_timeout(host, cmd);
   1034		sdhci_set_data_timeout_irq(host, false);
   1035	} else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
   1036		sdhci_set_data_timeout_irq(host, true);
   1037	}
   1038
   1039	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
   1040}
   1041EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
   1042
   1043static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
   1044{
   1045	if (host->ops->set_timeout)
   1046		host->ops->set_timeout(host, cmd);
   1047	else
   1048		__sdhci_set_timeout(host, cmd);
   1049}
   1050
   1051static void sdhci_initialize_data(struct sdhci_host *host,
   1052				  struct mmc_data *data)
   1053{
   1054	WARN_ON(host->data);
   1055
   1056	/* Sanity checks */
   1057	BUG_ON(data->blksz * data->blocks > 524288);
   1058	BUG_ON(data->blksz > host->mmc->max_blk_size);
   1059	BUG_ON(data->blocks > 65535);
   1060
   1061	host->data = data;
   1062	host->data_early = 0;
   1063	host->data->bytes_xfered = 0;
   1064}
   1065
   1066static inline void sdhci_set_block_info(struct sdhci_host *host,
   1067					struct mmc_data *data)
   1068{
   1069	/* Set the DMA boundary value and block size */
   1070	sdhci_writew(host,
   1071		     SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
   1072		     SDHCI_BLOCK_SIZE);
   1073	/*
   1074	 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
   1075	 * can be supported, in that case 16-bit block count register must be 0.
   1076	 */
   1077	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
   1078	    (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
   1079		if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
   1080			sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
   1081		sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
   1082	} else {
   1083		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
   1084	}
   1085}
   1086
   1087static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
   1088{
   1089	struct mmc_data *data = cmd->data;
   1090
   1091	sdhci_initialize_data(host, data);
   1092
   1093	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
   1094		struct scatterlist *sg;
   1095		unsigned int length_mask, offset_mask;
   1096		int i;
   1097
   1098		host->flags |= SDHCI_REQ_USE_DMA;
   1099
   1100		/*
   1101		 * FIXME: This doesn't account for merging when mapping the
   1102		 * scatterlist.
   1103		 *
   1104		 * The assumption here being that alignment and lengths are
   1105		 * the same after DMA mapping to device address space.
   1106		 */
   1107		length_mask = 0;
   1108		offset_mask = 0;
   1109		if (host->flags & SDHCI_USE_ADMA) {
   1110			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
   1111				length_mask = 3;
   1112				/*
   1113				 * As we use up to 3 byte chunks to work
   1114				 * around alignment problems, we need to
   1115				 * check the offset as well.
   1116				 */
   1117				offset_mask = 3;
   1118			}
   1119		} else {
   1120			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
   1121				length_mask = 3;
   1122			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
   1123				offset_mask = 3;
   1124		}
   1125
   1126		if (unlikely(length_mask | offset_mask)) {
   1127			for_each_sg(data->sg, sg, data->sg_len, i) {
   1128				if (sg->length & length_mask) {
   1129					DBG("Reverting to PIO because of transfer size (%d)\n",
   1130					    sg->length);
   1131					host->flags &= ~SDHCI_REQ_USE_DMA;
   1132					break;
   1133				}
   1134				if (sg->offset & offset_mask) {
   1135					DBG("Reverting to PIO because of bad alignment\n");
   1136					host->flags &= ~SDHCI_REQ_USE_DMA;
   1137					break;
   1138				}
   1139			}
   1140		}
   1141	}
   1142
   1143	if (host->flags & SDHCI_REQ_USE_DMA) {
   1144		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
   1145
   1146		if (sg_cnt <= 0) {
   1147			/*
   1148			 * This only happens when someone fed
   1149			 * us an invalid request.
   1150			 */
   1151			WARN_ON(1);
   1152			host->flags &= ~SDHCI_REQ_USE_DMA;
   1153		} else if (host->flags & SDHCI_USE_ADMA) {
   1154			sdhci_adma_table_pre(host, data, sg_cnt);
   1155			sdhci_set_adma_addr(host, host->adma_addr);
   1156		} else {
   1157			WARN_ON(sg_cnt != 1);
   1158			sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
   1159		}
   1160	}
   1161
   1162	sdhci_config_dma(host);
   1163
   1164	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
   1165		int flags;
   1166
   1167		flags = SG_MITER_ATOMIC;
   1168		if (host->data->flags & MMC_DATA_READ)
   1169			flags |= SG_MITER_TO_SG;
   1170		else
   1171			flags |= SG_MITER_FROM_SG;
   1172		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
   1173		host->blocks = data->blocks;
   1174	}
   1175
   1176	sdhci_set_transfer_irqs(host);
   1177
   1178	sdhci_set_block_info(host, data);
   1179}
   1180
   1181#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
   1182
   1183static int sdhci_external_dma_init(struct sdhci_host *host)
   1184{
   1185	int ret = 0;
   1186	struct mmc_host *mmc = host->mmc;
   1187
   1188	host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
   1189	if (IS_ERR(host->tx_chan)) {
   1190		ret = PTR_ERR(host->tx_chan);
   1191		if (ret != -EPROBE_DEFER)
   1192			pr_warn("Failed to request TX DMA channel.\n");
   1193		host->tx_chan = NULL;
   1194		return ret;
   1195	}
   1196
   1197	host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
   1198	if (IS_ERR(host->rx_chan)) {
   1199		if (host->tx_chan) {
   1200			dma_release_channel(host->tx_chan);
   1201			host->tx_chan = NULL;
   1202		}
   1203
   1204		ret = PTR_ERR(host->rx_chan);
   1205		if (ret != -EPROBE_DEFER)
   1206			pr_warn("Failed to request RX DMA channel.\n");
   1207		host->rx_chan = NULL;
   1208	}
   1209
   1210	return ret;
   1211}
   1212
   1213static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
   1214						   struct mmc_data *data)
   1215{
   1216	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
   1217}
   1218
   1219static int sdhci_external_dma_setup(struct sdhci_host *host,
   1220				    struct mmc_command *cmd)
   1221{
   1222	int ret, i;
   1223	enum dma_transfer_direction dir;
   1224	struct dma_async_tx_descriptor *desc;
   1225	struct mmc_data *data = cmd->data;
   1226	struct dma_chan *chan;
   1227	struct dma_slave_config cfg;
   1228	dma_cookie_t cookie;
   1229	int sg_cnt;
   1230
   1231	if (!host->mapbase)
   1232		return -EINVAL;
   1233
   1234	memset(&cfg, 0, sizeof(cfg));
   1235	cfg.src_addr = host->mapbase + SDHCI_BUFFER;
   1236	cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
   1237	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
   1238	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
   1239	cfg.src_maxburst = data->blksz / 4;
   1240	cfg.dst_maxburst = data->blksz / 4;
   1241
   1242	/* Sanity check: all the SG entries must be aligned by block size. */
   1243	for (i = 0; i < data->sg_len; i++) {
   1244		if ((data->sg + i)->length % data->blksz)
   1245			return -EINVAL;
   1246	}
   1247
   1248	chan = sdhci_external_dma_channel(host, data);
   1249
   1250	ret = dmaengine_slave_config(chan, &cfg);
   1251	if (ret)
   1252		return ret;
   1253
   1254	sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
   1255	if (sg_cnt <= 0)
   1256		return -EINVAL;
   1257
   1258	dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
   1259	desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
   1260				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
   1261	if (!desc)
   1262		return -EINVAL;
   1263
   1264	desc->callback = NULL;
   1265	desc->callback_param = NULL;
   1266
   1267	cookie = dmaengine_submit(desc);
   1268	if (dma_submit_error(cookie))
   1269		ret = cookie;
   1270
   1271	return ret;
   1272}
   1273
   1274static void sdhci_external_dma_release(struct sdhci_host *host)
   1275{
   1276	if (host->tx_chan) {
   1277		dma_release_channel(host->tx_chan);
   1278		host->tx_chan = NULL;
   1279	}
   1280
   1281	if (host->rx_chan) {
   1282		dma_release_channel(host->rx_chan);
   1283		host->rx_chan = NULL;
   1284	}
   1285
   1286	sdhci_switch_external_dma(host, false);
   1287}
   1288
   1289static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
   1290					      struct mmc_command *cmd)
   1291{
   1292	struct mmc_data *data = cmd->data;
   1293
   1294	sdhci_initialize_data(host, data);
   1295
   1296	host->flags |= SDHCI_REQ_USE_DMA;
   1297	sdhci_set_transfer_irqs(host);
   1298
   1299	sdhci_set_block_info(host, data);
   1300}
   1301
   1302static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
   1303					    struct mmc_command *cmd)
   1304{
   1305	if (!sdhci_external_dma_setup(host, cmd)) {
   1306		__sdhci_external_dma_prepare_data(host, cmd);
   1307	} else {
   1308		sdhci_external_dma_release(host);
   1309		pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
   1310		       mmc_hostname(host->mmc));
   1311		sdhci_prepare_data(host, cmd);
   1312	}
   1313}
   1314
   1315static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
   1316					    struct mmc_command *cmd)
   1317{
   1318	struct dma_chan *chan;
   1319
   1320	if (!cmd->data)
   1321		return;
   1322
   1323	chan = sdhci_external_dma_channel(host, cmd->data);
   1324	if (chan)
   1325		dma_async_issue_pending(chan);
   1326}
   1327
   1328#else
   1329
   1330static inline int sdhci_external_dma_init(struct sdhci_host *host)
   1331{
   1332	return -EOPNOTSUPP;
   1333}
   1334
   1335static inline void sdhci_external_dma_release(struct sdhci_host *host)
   1336{
   1337}
   1338
   1339static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
   1340						   struct mmc_command *cmd)
   1341{
   1342	/* This should never happen */
   1343	WARN_ON_ONCE(1);
   1344}
   1345
   1346static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
   1347						   struct mmc_command *cmd)
   1348{
   1349}
   1350
   1351static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
   1352							  struct mmc_data *data)
   1353{
   1354	return NULL;
   1355}
   1356
   1357#endif
   1358
   1359void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
   1360{
   1361	host->use_external_dma = en;
   1362}
   1363EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
   1364
   1365static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
   1366				    struct mmc_request *mrq)
   1367{
   1368	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
   1369	       !mrq->cap_cmd_during_tfr;
   1370}
   1371
   1372static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
   1373				    struct mmc_request *mrq)
   1374{
   1375	return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
   1376}
   1377
   1378static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
   1379				      struct mmc_request *mrq)
   1380{
   1381	return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
   1382}
   1383
   1384static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
   1385					 struct mmc_command *cmd,
   1386					 u16 *mode)
   1387{
   1388	bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
   1389			 (cmd->opcode != SD_IO_RW_EXTENDED);
   1390	bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
   1391	u16 ctrl2;
   1392
   1393	/*
   1394	 * In case of Version 4.10 or later, use of 'Auto CMD Auto
   1395	 * Select' is recommended rather than use of 'Auto CMD12
   1396	 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
   1397	 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
   1398	 */
   1399	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
   1400	    (use_cmd12 || use_cmd23)) {
   1401		*mode |= SDHCI_TRNS_AUTO_SEL;
   1402
   1403		ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   1404		if (use_cmd23)
   1405			ctrl2 |= SDHCI_CMD23_ENABLE;
   1406		else
   1407			ctrl2 &= ~SDHCI_CMD23_ENABLE;
   1408		sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
   1409
   1410		return;
   1411	}
   1412
   1413	/*
   1414	 * If we are sending CMD23, CMD12 never gets sent
   1415	 * on successful completion (so no Auto-CMD12).
   1416	 */
   1417	if (use_cmd12)
   1418		*mode |= SDHCI_TRNS_AUTO_CMD12;
   1419	else if (use_cmd23)
   1420		*mode |= SDHCI_TRNS_AUTO_CMD23;
   1421}
   1422
   1423static void sdhci_set_transfer_mode(struct sdhci_host *host,
   1424	struct mmc_command *cmd)
   1425{
   1426	u16 mode = 0;
   1427	struct mmc_data *data = cmd->data;
   1428
   1429	if (data == NULL) {
   1430		if (host->quirks2 &
   1431			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
   1432			/* must not clear SDHCI_TRANSFER_MODE when tuning */
   1433			if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
   1434				sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
   1435		} else {
   1436		/* clear Auto CMD settings for no data CMDs */
   1437			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
   1438			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
   1439				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
   1440		}
   1441		return;
   1442	}
   1443
   1444	WARN_ON(!host->data);
   1445
   1446	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
   1447		mode = SDHCI_TRNS_BLK_CNT_EN;
   1448
   1449	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
   1450		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
   1451		sdhci_auto_cmd_select(host, cmd, &mode);
   1452		if (sdhci_auto_cmd23(host, cmd->mrq))
   1453			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
   1454	}
   1455
   1456	if (data->flags & MMC_DATA_READ)
   1457		mode |= SDHCI_TRNS_READ;
   1458	if (host->flags & SDHCI_REQ_USE_DMA)
   1459		mode |= SDHCI_TRNS_DMA;
   1460
   1461	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
   1462}
   1463
   1464static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
   1465{
   1466	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
   1467		((mrq->cmd && mrq->cmd->error) ||
   1468		 (mrq->sbc && mrq->sbc->error) ||
   1469		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
   1470		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
   1471}
   1472
   1473static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
   1474{
   1475	int i;
   1476
   1477	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
   1478		if (host->mrqs_done[i] == mrq) {
   1479			WARN_ON(1);
   1480			return;
   1481		}
   1482	}
   1483
   1484	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
   1485		if (!host->mrqs_done[i]) {
   1486			host->mrqs_done[i] = mrq;
   1487			break;
   1488		}
   1489	}
   1490
   1491	WARN_ON(i >= SDHCI_MAX_MRQS);
   1492}
   1493
   1494static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
   1495{
   1496	if (host->cmd && host->cmd->mrq == mrq)
   1497		host->cmd = NULL;
   1498
   1499	if (host->data_cmd && host->data_cmd->mrq == mrq)
   1500		host->data_cmd = NULL;
   1501
   1502	if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
   1503		host->deferred_cmd = NULL;
   1504
   1505	if (host->data && host->data->mrq == mrq)
   1506		host->data = NULL;
   1507
   1508	if (sdhci_needs_reset(host, mrq))
   1509		host->pending_reset = true;
   1510
   1511	sdhci_set_mrq_done(host, mrq);
   1512
   1513	sdhci_del_timer(host, mrq);
   1514
   1515	if (!sdhci_has_requests(host))
   1516		sdhci_led_deactivate(host);
   1517}
   1518
   1519static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
   1520{
   1521	__sdhci_finish_mrq(host, mrq);
   1522
   1523	queue_work(host->complete_wq, &host->complete_work);
   1524}
   1525
   1526static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
   1527{
   1528	struct mmc_command *data_cmd = host->data_cmd;
   1529	struct mmc_data *data = host->data;
   1530
   1531	host->data = NULL;
   1532	host->data_cmd = NULL;
   1533
   1534	/*
   1535	 * The controller needs a reset of internal state machines upon error
   1536	 * conditions.
   1537	 */
   1538	if (data->error) {
   1539		if (!host->cmd || host->cmd == data_cmd)
   1540			sdhci_do_reset(host, SDHCI_RESET_CMD);
   1541		sdhci_do_reset(host, SDHCI_RESET_DATA);
   1542	}
   1543
   1544	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
   1545	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
   1546		sdhci_adma_table_post(host, data);
   1547
   1548	/*
   1549	 * The specification states that the block count register must
   1550	 * be updated, but it does not specify at what point in the
   1551	 * data flow. That makes the register entirely useless to read
   1552	 * back so we have to assume that nothing made it to the card
   1553	 * in the event of an error.
   1554	 */
   1555	if (data->error)
   1556		data->bytes_xfered = 0;
   1557	else
   1558		data->bytes_xfered = data->blksz * data->blocks;
   1559
   1560	/*
   1561	 * Need to send CMD12 if -
   1562	 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
   1563	 * b) error in multiblock transfer
   1564	 */
   1565	if (data->stop &&
   1566	    ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
   1567	     data->error)) {
   1568		/*
   1569		 * 'cap_cmd_during_tfr' request must not use the command line
   1570		 * after mmc_command_done() has been called. It is upper layer's
   1571		 * responsibility to send the stop command if required.
   1572		 */
   1573		if (data->mrq->cap_cmd_during_tfr) {
   1574			__sdhci_finish_mrq(host, data->mrq);
   1575		} else {
   1576			/* Avoid triggering warning in sdhci_send_command() */
   1577			host->cmd = NULL;
   1578			if (!sdhci_send_command(host, data->stop)) {
   1579				if (sw_data_timeout) {
   1580					/*
   1581					 * This is anyway a sw data timeout, so
   1582					 * give up now.
   1583					 */
   1584					data->stop->error = -EIO;
   1585					__sdhci_finish_mrq(host, data->mrq);
   1586				} else {
   1587					WARN_ON(host->deferred_cmd);
   1588					host->deferred_cmd = data->stop;
   1589				}
   1590			}
   1591		}
   1592	} else {
   1593		__sdhci_finish_mrq(host, data->mrq);
   1594	}
   1595}
   1596
   1597static void sdhci_finish_data(struct sdhci_host *host)
   1598{
   1599	__sdhci_finish_data(host, false);
   1600}
   1601
   1602static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
   1603{
   1604	int flags;
   1605	u32 mask;
   1606	unsigned long timeout;
   1607
   1608	WARN_ON(host->cmd);
   1609
   1610	/* Initially, a command has no error */
   1611	cmd->error = 0;
   1612
   1613	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
   1614	    cmd->opcode == MMC_STOP_TRANSMISSION)
   1615		cmd->flags |= MMC_RSP_BUSY;
   1616
   1617	mask = SDHCI_CMD_INHIBIT;
   1618	if (sdhci_data_line_cmd(cmd))
   1619		mask |= SDHCI_DATA_INHIBIT;
   1620
   1621	/* We shouldn't wait for data inihibit for stop commands, even
   1622	   though they might use busy signaling */
   1623	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
   1624		mask &= ~SDHCI_DATA_INHIBIT;
   1625
   1626	if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
   1627		return false;
   1628
   1629	host->cmd = cmd;
   1630	host->data_timeout = 0;
   1631	if (sdhci_data_line_cmd(cmd)) {
   1632		WARN_ON(host->data_cmd);
   1633		host->data_cmd = cmd;
   1634		sdhci_set_timeout(host, cmd);
   1635	}
   1636
   1637	if (cmd->data) {
   1638		if (host->use_external_dma)
   1639			sdhci_external_dma_prepare_data(host, cmd);
   1640		else
   1641			sdhci_prepare_data(host, cmd);
   1642	}
   1643
   1644	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
   1645
   1646	sdhci_set_transfer_mode(host, cmd);
   1647
   1648	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
   1649		WARN_ONCE(1, "Unsupported response type!\n");
   1650		/*
   1651		 * This does not happen in practice because 136-bit response
   1652		 * commands never have busy waiting, so rather than complicate
   1653		 * the error path, just remove busy waiting and continue.
   1654		 */
   1655		cmd->flags &= ~MMC_RSP_BUSY;
   1656	}
   1657
   1658	if (!(cmd->flags & MMC_RSP_PRESENT))
   1659		flags = SDHCI_CMD_RESP_NONE;
   1660	else if (cmd->flags & MMC_RSP_136)
   1661		flags = SDHCI_CMD_RESP_LONG;
   1662	else if (cmd->flags & MMC_RSP_BUSY)
   1663		flags = SDHCI_CMD_RESP_SHORT_BUSY;
   1664	else
   1665		flags = SDHCI_CMD_RESP_SHORT;
   1666
   1667	if (cmd->flags & MMC_RSP_CRC)
   1668		flags |= SDHCI_CMD_CRC;
   1669	if (cmd->flags & MMC_RSP_OPCODE)
   1670		flags |= SDHCI_CMD_INDEX;
   1671
   1672	/* CMD19 is special in that the Data Present Select should be set */
   1673	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
   1674	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
   1675		flags |= SDHCI_CMD_DATA;
   1676
   1677	timeout = jiffies;
   1678	if (host->data_timeout)
   1679		timeout += nsecs_to_jiffies(host->data_timeout);
   1680	else if (!cmd->data && cmd->busy_timeout > 9000)
   1681		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
   1682	else
   1683		timeout += 10 * HZ;
   1684	sdhci_mod_timer(host, cmd->mrq, timeout);
   1685
   1686	if (host->use_external_dma)
   1687		sdhci_external_dma_pre_transfer(host, cmd);
   1688
   1689	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
   1690
   1691	return true;
   1692}
   1693
   1694static bool sdhci_present_error(struct sdhci_host *host,
   1695				struct mmc_command *cmd, bool present)
   1696{
   1697	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
   1698		cmd->error = -ENOMEDIUM;
   1699		return true;
   1700	}
   1701
   1702	return false;
   1703}
   1704
   1705static bool sdhci_send_command_retry(struct sdhci_host *host,
   1706				     struct mmc_command *cmd,
   1707				     unsigned long flags)
   1708	__releases(host->lock)
   1709	__acquires(host->lock)
   1710{
   1711	struct mmc_command *deferred_cmd = host->deferred_cmd;
   1712	int timeout = 10; /* Approx. 10 ms */
   1713	bool present;
   1714
   1715	while (!sdhci_send_command(host, cmd)) {
   1716		if (!timeout--) {
   1717			pr_err("%s: Controller never released inhibit bit(s).\n",
   1718			       mmc_hostname(host->mmc));
   1719			sdhci_dumpregs(host);
   1720			cmd->error = -EIO;
   1721			return false;
   1722		}
   1723
   1724		spin_unlock_irqrestore(&host->lock, flags);
   1725
   1726		usleep_range(1000, 1250);
   1727
   1728		present = host->mmc->ops->get_cd(host->mmc);
   1729
   1730		spin_lock_irqsave(&host->lock, flags);
   1731
   1732		/* A deferred command might disappear, handle that */
   1733		if (cmd == deferred_cmd && cmd != host->deferred_cmd)
   1734			return true;
   1735
   1736		if (sdhci_present_error(host, cmd, present))
   1737			return false;
   1738	}
   1739
   1740	if (cmd == host->deferred_cmd)
   1741		host->deferred_cmd = NULL;
   1742
   1743	return true;
   1744}
   1745
   1746static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
   1747{
   1748	int i, reg;
   1749
   1750	for (i = 0; i < 4; i++) {
   1751		reg = SDHCI_RESPONSE + (3 - i) * 4;
   1752		cmd->resp[i] = sdhci_readl(host, reg);
   1753	}
   1754
   1755	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
   1756		return;
   1757
   1758	/* CRC is stripped so we need to do some shifting */
   1759	for (i = 0; i < 4; i++) {
   1760		cmd->resp[i] <<= 8;
   1761		if (i != 3)
   1762			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
   1763	}
   1764}
   1765
   1766static void sdhci_finish_command(struct sdhci_host *host)
   1767{
   1768	struct mmc_command *cmd = host->cmd;
   1769
   1770	host->cmd = NULL;
   1771
   1772	if (cmd->flags & MMC_RSP_PRESENT) {
   1773		if (cmd->flags & MMC_RSP_136) {
   1774			sdhci_read_rsp_136(host, cmd);
   1775		} else {
   1776			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
   1777		}
   1778	}
   1779
   1780	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
   1781		mmc_command_done(host->mmc, cmd->mrq);
   1782
   1783	/*
   1784	 * The host can send and interrupt when the busy state has
   1785	 * ended, allowing us to wait without wasting CPU cycles.
   1786	 * The busy signal uses DAT0 so this is similar to waiting
   1787	 * for data to complete.
   1788	 *
   1789	 * Note: The 1.0 specification is a bit ambiguous about this
   1790	 *       feature so there might be some problems with older
   1791	 *       controllers.
   1792	 */
   1793	if (cmd->flags & MMC_RSP_BUSY) {
   1794		if (cmd->data) {
   1795			DBG("Cannot wait for busy signal when also doing a data transfer");
   1796		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
   1797			   cmd == host->data_cmd) {
   1798			/* Command complete before busy is ended */
   1799			return;
   1800		}
   1801	}
   1802
   1803	/* Finished CMD23, now send actual command. */
   1804	if (cmd == cmd->mrq->sbc) {
   1805		if (!sdhci_send_command(host, cmd->mrq->cmd)) {
   1806			WARN_ON(host->deferred_cmd);
   1807			host->deferred_cmd = cmd->mrq->cmd;
   1808		}
   1809	} else {
   1810
   1811		/* Processed actual command. */
   1812		if (host->data && host->data_early)
   1813			sdhci_finish_data(host);
   1814
   1815		if (!cmd->data)
   1816			__sdhci_finish_mrq(host, cmd->mrq);
   1817	}
   1818}
   1819
   1820static u16 sdhci_get_preset_value(struct sdhci_host *host)
   1821{
   1822	u16 preset = 0;
   1823
   1824	switch (host->timing) {
   1825	case MMC_TIMING_MMC_HS:
   1826	case MMC_TIMING_SD_HS:
   1827		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
   1828		break;
   1829	case MMC_TIMING_UHS_SDR12:
   1830		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
   1831		break;
   1832	case MMC_TIMING_UHS_SDR25:
   1833		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
   1834		break;
   1835	case MMC_TIMING_UHS_SDR50:
   1836		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
   1837		break;
   1838	case MMC_TIMING_UHS_SDR104:
   1839	case MMC_TIMING_MMC_HS200:
   1840		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
   1841		break;
   1842	case MMC_TIMING_UHS_DDR50:
   1843	case MMC_TIMING_MMC_DDR52:
   1844		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
   1845		break;
   1846	case MMC_TIMING_MMC_HS400:
   1847		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
   1848		break;
   1849	default:
   1850		pr_warn("%s: Invalid UHS-I mode selected\n",
   1851			mmc_hostname(host->mmc));
   1852		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
   1853		break;
   1854	}
   1855	return preset;
   1856}
   1857
   1858u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
   1859		   unsigned int *actual_clock)
   1860{
   1861	int div = 0; /* Initialized for compiler warning */
   1862	int real_div = div, clk_mul = 1;
   1863	u16 clk = 0;
   1864	bool switch_base_clk = false;
   1865
   1866	if (host->version >= SDHCI_SPEC_300) {
   1867		if (host->preset_enabled) {
   1868			u16 pre_val;
   1869
   1870			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
   1871			pre_val = sdhci_get_preset_value(host);
   1872			div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
   1873			if (host->clk_mul &&
   1874				(pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
   1875				clk = SDHCI_PROG_CLOCK_MODE;
   1876				real_div = div + 1;
   1877				clk_mul = host->clk_mul;
   1878			} else {
   1879				real_div = max_t(int, 1, div << 1);
   1880			}
   1881			goto clock_set;
   1882		}
   1883
   1884		/*
   1885		 * Check if the Host Controller supports Programmable Clock
   1886		 * Mode.
   1887		 */
   1888		if (host->clk_mul) {
   1889			for (div = 1; div <= 1024; div++) {
   1890				if ((host->max_clk * host->clk_mul / div)
   1891					<= clock)
   1892					break;
   1893			}
   1894			if ((host->max_clk * host->clk_mul / div) <= clock) {
   1895				/*
   1896				 * Set Programmable Clock Mode in the Clock
   1897				 * Control register.
   1898				 */
   1899				clk = SDHCI_PROG_CLOCK_MODE;
   1900				real_div = div;
   1901				clk_mul = host->clk_mul;
   1902				div--;
   1903			} else {
   1904				/*
   1905				 * Divisor can be too small to reach clock
   1906				 * speed requirement. Then use the base clock.
   1907				 */
   1908				switch_base_clk = true;
   1909			}
   1910		}
   1911
   1912		if (!host->clk_mul || switch_base_clk) {
   1913			/* Version 3.00 divisors must be a multiple of 2. */
   1914			if (host->max_clk <= clock)
   1915				div = 1;
   1916			else {
   1917				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
   1918				     div += 2) {
   1919					if ((host->max_clk / div) <= clock)
   1920						break;
   1921				}
   1922			}
   1923			real_div = div;
   1924			div >>= 1;
   1925			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
   1926				&& !div && host->max_clk <= 25000000)
   1927				div = 1;
   1928		}
   1929	} else {
   1930		/* Version 2.00 divisors must be a power of 2. */
   1931		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
   1932			if ((host->max_clk / div) <= clock)
   1933				break;
   1934		}
   1935		real_div = div;
   1936		div >>= 1;
   1937	}
   1938
   1939clock_set:
   1940	if (real_div)
   1941		*actual_clock = (host->max_clk * clk_mul) / real_div;
   1942	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
   1943	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
   1944		<< SDHCI_DIVIDER_HI_SHIFT;
   1945
   1946	return clk;
   1947}
   1948EXPORT_SYMBOL_GPL(sdhci_calc_clk);
   1949
   1950void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
   1951{
   1952	ktime_t timeout;
   1953
   1954	clk |= SDHCI_CLOCK_INT_EN;
   1955	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
   1956
   1957	/* Wait max 150 ms */
   1958	timeout = ktime_add_ms(ktime_get(), 150);
   1959	while (1) {
   1960		bool timedout = ktime_after(ktime_get(), timeout);
   1961
   1962		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
   1963		if (clk & SDHCI_CLOCK_INT_STABLE)
   1964			break;
   1965		if (timedout) {
   1966			pr_err("%s: Internal clock never stabilised.\n",
   1967			       mmc_hostname(host->mmc));
   1968			sdhci_dumpregs(host);
   1969			return;
   1970		}
   1971		udelay(10);
   1972	}
   1973
   1974	if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
   1975		clk |= SDHCI_CLOCK_PLL_EN;
   1976		clk &= ~SDHCI_CLOCK_INT_STABLE;
   1977		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
   1978
   1979		/* Wait max 150 ms */
   1980		timeout = ktime_add_ms(ktime_get(), 150);
   1981		while (1) {
   1982			bool timedout = ktime_after(ktime_get(), timeout);
   1983
   1984			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
   1985			if (clk & SDHCI_CLOCK_INT_STABLE)
   1986				break;
   1987			if (timedout) {
   1988				pr_err("%s: PLL clock never stabilised.\n",
   1989				       mmc_hostname(host->mmc));
   1990				sdhci_dumpregs(host);
   1991				return;
   1992			}
   1993			udelay(10);
   1994		}
   1995	}
   1996
   1997	clk |= SDHCI_CLOCK_CARD_EN;
   1998	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
   1999}
   2000EXPORT_SYMBOL_GPL(sdhci_enable_clk);
   2001
   2002void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
   2003{
   2004	u16 clk;
   2005
   2006	host->mmc->actual_clock = 0;
   2007
   2008	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
   2009
   2010	if (clock == 0)
   2011		return;
   2012
   2013	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
   2014	sdhci_enable_clk(host, clk);
   2015}
   2016EXPORT_SYMBOL_GPL(sdhci_set_clock);
   2017
   2018static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
   2019				unsigned short vdd)
   2020{
   2021	struct mmc_host *mmc = host->mmc;
   2022
   2023	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
   2024
   2025	if (mode != MMC_POWER_OFF)
   2026		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
   2027	else
   2028		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
   2029}
   2030
   2031void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
   2032			   unsigned short vdd)
   2033{
   2034	u8 pwr = 0;
   2035
   2036	if (mode != MMC_POWER_OFF) {
   2037		switch (1 << vdd) {
   2038		case MMC_VDD_165_195:
   2039		/*
   2040		 * Without a regulator, SDHCI does not support 2.0v
   2041		 * so we only get here if the driver deliberately
   2042		 * added the 2.0v range to ocr_avail. Map it to 1.8v
   2043		 * for the purpose of turning on the power.
   2044		 */
   2045		case MMC_VDD_20_21:
   2046			pwr = SDHCI_POWER_180;
   2047			break;
   2048		case MMC_VDD_29_30:
   2049		case MMC_VDD_30_31:
   2050			pwr = SDHCI_POWER_300;
   2051			break;
   2052		case MMC_VDD_32_33:
   2053		case MMC_VDD_33_34:
   2054		/*
   2055		 * 3.4 ~ 3.6V are valid only for those platforms where it's
   2056		 * known that the voltage range is supported by hardware.
   2057		 */
   2058		case MMC_VDD_34_35:
   2059		case MMC_VDD_35_36:
   2060			pwr = SDHCI_POWER_330;
   2061			break;
   2062		default:
   2063			WARN(1, "%s: Invalid vdd %#x\n",
   2064			     mmc_hostname(host->mmc), vdd);
   2065			break;
   2066		}
   2067	}
   2068
   2069	if (host->pwr == pwr)
   2070		return;
   2071
   2072	host->pwr = pwr;
   2073
   2074	if (pwr == 0) {
   2075		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
   2076		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
   2077			sdhci_runtime_pm_bus_off(host);
   2078	} else {
   2079		/*
   2080		 * Spec says that we should clear the power reg before setting
   2081		 * a new value. Some controllers don't seem to like this though.
   2082		 */
   2083		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
   2084			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
   2085
   2086		/*
   2087		 * At least the Marvell CaFe chip gets confused if we set the
   2088		 * voltage and set turn on power at the same time, so set the
   2089		 * voltage first.
   2090		 */
   2091		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
   2092			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
   2093
   2094		pwr |= SDHCI_POWER_ON;
   2095
   2096		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
   2097
   2098		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
   2099			sdhci_runtime_pm_bus_on(host);
   2100
   2101		/*
   2102		 * Some controllers need an extra 10ms delay of 10ms before
   2103		 * they can apply clock after applying power
   2104		 */
   2105		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
   2106			mdelay(10);
   2107	}
   2108}
   2109EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
   2110
   2111void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
   2112		     unsigned short vdd)
   2113{
   2114	if (IS_ERR(host->mmc->supply.vmmc))
   2115		sdhci_set_power_noreg(host, mode, vdd);
   2116	else
   2117		sdhci_set_power_reg(host, mode, vdd);
   2118}
   2119EXPORT_SYMBOL_GPL(sdhci_set_power);
   2120
   2121/*
   2122 * Some controllers need to configure a valid bus voltage on their power
   2123 * register regardless of whether an external regulator is taking care of power
   2124 * supply. This helper function takes care of it if set as the controller's
   2125 * sdhci_ops.set_power callback.
   2126 */
   2127void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
   2128				     unsigned char mode,
   2129				     unsigned short vdd)
   2130{
   2131	if (!IS_ERR(host->mmc->supply.vmmc)) {
   2132		struct mmc_host *mmc = host->mmc;
   2133
   2134		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
   2135	}
   2136	sdhci_set_power_noreg(host, mode, vdd);
   2137}
   2138EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
   2139
   2140/*****************************************************************************\
   2141 *                                                                           *
   2142 * MMC callbacks                                                             *
   2143 *                                                                           *
   2144\*****************************************************************************/
   2145
   2146void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
   2147{
   2148	struct sdhci_host *host = mmc_priv(mmc);
   2149	struct mmc_command *cmd;
   2150	unsigned long flags;
   2151	bool present;
   2152
   2153	/* Firstly check card presence */
   2154	present = mmc->ops->get_cd(mmc);
   2155
   2156	spin_lock_irqsave(&host->lock, flags);
   2157
   2158	sdhci_led_activate(host);
   2159
   2160	if (sdhci_present_error(host, mrq->cmd, present))
   2161		goto out_finish;
   2162
   2163	cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
   2164
   2165	if (!sdhci_send_command_retry(host, cmd, flags))
   2166		goto out_finish;
   2167
   2168	spin_unlock_irqrestore(&host->lock, flags);
   2169
   2170	return;
   2171
   2172out_finish:
   2173	sdhci_finish_mrq(host, mrq);
   2174	spin_unlock_irqrestore(&host->lock, flags);
   2175}
   2176EXPORT_SYMBOL_GPL(sdhci_request);
   2177
   2178int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
   2179{
   2180	struct sdhci_host *host = mmc_priv(mmc);
   2181	struct mmc_command *cmd;
   2182	unsigned long flags;
   2183	int ret = 0;
   2184
   2185	spin_lock_irqsave(&host->lock, flags);
   2186
   2187	if (sdhci_present_error(host, mrq->cmd, true)) {
   2188		sdhci_finish_mrq(host, mrq);
   2189		goto out_finish;
   2190	}
   2191
   2192	cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
   2193
   2194	/*
   2195	 * The HSQ may send a command in interrupt context without polling
   2196	 * the busy signaling, which means we should return BUSY if controller
   2197	 * has not released inhibit bits to allow HSQ trying to send request
   2198	 * again in non-atomic context. So we should not finish this request
   2199	 * here.
   2200	 */
   2201	if (!sdhci_send_command(host, cmd))
   2202		ret = -EBUSY;
   2203	else
   2204		sdhci_led_activate(host);
   2205
   2206out_finish:
   2207	spin_unlock_irqrestore(&host->lock, flags);
   2208	return ret;
   2209}
   2210EXPORT_SYMBOL_GPL(sdhci_request_atomic);
   2211
   2212void sdhci_set_bus_width(struct sdhci_host *host, int width)
   2213{
   2214	u8 ctrl;
   2215
   2216	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
   2217	if (width == MMC_BUS_WIDTH_8) {
   2218		ctrl &= ~SDHCI_CTRL_4BITBUS;
   2219		ctrl |= SDHCI_CTRL_8BITBUS;
   2220	} else {
   2221		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
   2222			ctrl &= ~SDHCI_CTRL_8BITBUS;
   2223		if (width == MMC_BUS_WIDTH_4)
   2224			ctrl |= SDHCI_CTRL_4BITBUS;
   2225		else
   2226			ctrl &= ~SDHCI_CTRL_4BITBUS;
   2227	}
   2228	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
   2229}
   2230EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
   2231
   2232void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
   2233{
   2234	u16 ctrl_2;
   2235
   2236	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2237	/* Select Bus Speed Mode for host */
   2238	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
   2239	if ((timing == MMC_TIMING_MMC_HS200) ||
   2240	    (timing == MMC_TIMING_UHS_SDR104))
   2241		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
   2242	else if (timing == MMC_TIMING_UHS_SDR12)
   2243		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
   2244	else if (timing == MMC_TIMING_UHS_SDR25)
   2245		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
   2246	else if (timing == MMC_TIMING_UHS_SDR50)
   2247		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
   2248	else if ((timing == MMC_TIMING_UHS_DDR50) ||
   2249		 (timing == MMC_TIMING_MMC_DDR52))
   2250		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
   2251	else if (timing == MMC_TIMING_MMC_HS400)
   2252		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
   2253	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
   2254}
   2255EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
   2256
   2257void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
   2258{
   2259	struct sdhci_host *host = mmc_priv(mmc);
   2260	u8 ctrl;
   2261
   2262	if (ios->power_mode == MMC_POWER_UNDEFINED)
   2263		return;
   2264
   2265	if (host->flags & SDHCI_DEVICE_DEAD) {
   2266		if (!IS_ERR(mmc->supply.vmmc) &&
   2267		    ios->power_mode == MMC_POWER_OFF)
   2268			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
   2269		return;
   2270	}
   2271
   2272	/*
   2273	 * Reset the chip on each power off.
   2274	 * Should clear out any weird states.
   2275	 */
   2276	if (ios->power_mode == MMC_POWER_OFF) {
   2277		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
   2278		sdhci_reinit(host);
   2279	}
   2280
   2281	if (host->version >= SDHCI_SPEC_300 &&
   2282		(ios->power_mode == MMC_POWER_UP) &&
   2283		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
   2284		sdhci_enable_preset_value(host, false);
   2285
   2286	if (!ios->clock || ios->clock != host->clock) {
   2287		host->ops->set_clock(host, ios->clock);
   2288		host->clock = ios->clock;
   2289
   2290		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
   2291		    host->clock) {
   2292			host->timeout_clk = mmc->actual_clock ?
   2293						mmc->actual_clock / 1000 :
   2294						host->clock / 1000;
   2295			mmc->max_busy_timeout =
   2296				host->ops->get_max_timeout_count ?
   2297				host->ops->get_max_timeout_count(host) :
   2298				1 << 27;
   2299			mmc->max_busy_timeout /= host->timeout_clk;
   2300		}
   2301	}
   2302
   2303	if (host->ops->set_power)
   2304		host->ops->set_power(host, ios->power_mode, ios->vdd);
   2305	else
   2306		sdhci_set_power(host, ios->power_mode, ios->vdd);
   2307
   2308	if (host->ops->platform_send_init_74_clocks)
   2309		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
   2310
   2311	host->ops->set_bus_width(host, ios->bus_width);
   2312
   2313	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
   2314
   2315	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
   2316		if (ios->timing == MMC_TIMING_SD_HS ||
   2317		     ios->timing == MMC_TIMING_MMC_HS ||
   2318		     ios->timing == MMC_TIMING_MMC_HS400 ||
   2319		     ios->timing == MMC_TIMING_MMC_HS200 ||
   2320		     ios->timing == MMC_TIMING_MMC_DDR52 ||
   2321		     ios->timing == MMC_TIMING_UHS_SDR50 ||
   2322		     ios->timing == MMC_TIMING_UHS_SDR104 ||
   2323		     ios->timing == MMC_TIMING_UHS_DDR50 ||
   2324		     ios->timing == MMC_TIMING_UHS_SDR25)
   2325			ctrl |= SDHCI_CTRL_HISPD;
   2326		else
   2327			ctrl &= ~SDHCI_CTRL_HISPD;
   2328	}
   2329
   2330	if (host->version >= SDHCI_SPEC_300) {
   2331		u16 clk, ctrl_2;
   2332
   2333		if (!host->preset_enabled) {
   2334			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
   2335			/*
   2336			 * We only need to set Driver Strength if the
   2337			 * preset value enable is not set.
   2338			 */
   2339			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2340			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
   2341			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
   2342				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
   2343			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
   2344				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
   2345			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
   2346				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
   2347			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
   2348				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
   2349			else {
   2350				pr_warn("%s: invalid driver type, default to driver type B\n",
   2351					mmc_hostname(mmc));
   2352				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
   2353			}
   2354
   2355			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
   2356		} else {
   2357			/*
   2358			 * According to SDHC Spec v3.00, if the Preset Value
   2359			 * Enable in the Host Control 2 register is set, we
   2360			 * need to reset SD Clock Enable before changing High
   2361			 * Speed Enable to avoid generating clock gliches.
   2362			 */
   2363
   2364			/* Reset SD Clock Enable */
   2365			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
   2366			clk &= ~SDHCI_CLOCK_CARD_EN;
   2367			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
   2368
   2369			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
   2370
   2371			/* Re-enable SD Clock */
   2372			host->ops->set_clock(host, host->clock);
   2373		}
   2374
   2375		/* Reset SD Clock Enable */
   2376		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
   2377		clk &= ~SDHCI_CLOCK_CARD_EN;
   2378		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
   2379
   2380		host->ops->set_uhs_signaling(host, ios->timing);
   2381		host->timing = ios->timing;
   2382
   2383		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
   2384				((ios->timing == MMC_TIMING_UHS_SDR12) ||
   2385				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
   2386				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
   2387				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
   2388				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
   2389				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
   2390			u16 preset;
   2391
   2392			sdhci_enable_preset_value(host, true);
   2393			preset = sdhci_get_preset_value(host);
   2394			ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
   2395						  preset);
   2396		}
   2397
   2398		/* Re-enable SD Clock */
   2399		host->ops->set_clock(host, host->clock);
   2400	} else
   2401		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
   2402
   2403	/*
   2404	 * Some (ENE) controllers go apeshit on some ios operation,
   2405	 * signalling timeout and CRC errors even on CMD0. Resetting
   2406	 * it on each ios seems to solve the problem.
   2407	 */
   2408	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
   2409		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
   2410}
   2411EXPORT_SYMBOL_GPL(sdhci_set_ios);
   2412
   2413static int sdhci_get_cd(struct mmc_host *mmc)
   2414{
   2415	struct sdhci_host *host = mmc_priv(mmc);
   2416	int gpio_cd = mmc_gpio_get_cd(mmc);
   2417
   2418	if (host->flags & SDHCI_DEVICE_DEAD)
   2419		return 0;
   2420
   2421	/* If nonremovable, assume that the card is always present. */
   2422	if (!mmc_card_is_removable(mmc))
   2423		return 1;
   2424
   2425	/*
   2426	 * Try slot gpio detect, if defined it take precedence
   2427	 * over build in controller functionality
   2428	 */
   2429	if (gpio_cd >= 0)
   2430		return !!gpio_cd;
   2431
   2432	/* If polling, assume that the card is always present. */
   2433	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
   2434		return 1;
   2435
   2436	/* Host native card detect */
   2437	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
   2438}
   2439
   2440int sdhci_get_cd_nogpio(struct mmc_host *mmc)
   2441{
   2442	struct sdhci_host *host = mmc_priv(mmc);
   2443	unsigned long flags;
   2444	int ret = 0;
   2445
   2446	spin_lock_irqsave(&host->lock, flags);
   2447
   2448	if (host->flags & SDHCI_DEVICE_DEAD)
   2449		goto out;
   2450
   2451	ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
   2452out:
   2453	spin_unlock_irqrestore(&host->lock, flags);
   2454
   2455	return ret;
   2456}
   2457EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
   2458
   2459static int sdhci_check_ro(struct sdhci_host *host)
   2460{
   2461	unsigned long flags;
   2462	int is_readonly;
   2463
   2464	spin_lock_irqsave(&host->lock, flags);
   2465
   2466	if (host->flags & SDHCI_DEVICE_DEAD)
   2467		is_readonly = 0;
   2468	else if (host->ops->get_ro)
   2469		is_readonly = host->ops->get_ro(host);
   2470	else if (mmc_can_gpio_ro(host->mmc))
   2471		is_readonly = mmc_gpio_get_ro(host->mmc);
   2472	else
   2473		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
   2474				& SDHCI_WRITE_PROTECT);
   2475
   2476	spin_unlock_irqrestore(&host->lock, flags);
   2477
   2478	/* This quirk needs to be replaced by a callback-function later */
   2479	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
   2480		!is_readonly : is_readonly;
   2481}
   2482
   2483#define SAMPLE_COUNT	5
   2484
   2485static int sdhci_get_ro(struct mmc_host *mmc)
   2486{
   2487	struct sdhci_host *host = mmc_priv(mmc);
   2488	int i, ro_count;
   2489
   2490	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
   2491		return sdhci_check_ro(host);
   2492
   2493	ro_count = 0;
   2494	for (i = 0; i < SAMPLE_COUNT; i++) {
   2495		if (sdhci_check_ro(host)) {
   2496			if (++ro_count > SAMPLE_COUNT / 2)
   2497				return 1;
   2498		}
   2499		msleep(30);
   2500	}
   2501	return 0;
   2502}
   2503
   2504static void sdhci_hw_reset(struct mmc_host *mmc)
   2505{
   2506	struct sdhci_host *host = mmc_priv(mmc);
   2507
   2508	if (host->ops && host->ops->hw_reset)
   2509		host->ops->hw_reset(host);
   2510}
   2511
   2512static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
   2513{
   2514	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
   2515		if (enable)
   2516			host->ier |= SDHCI_INT_CARD_INT;
   2517		else
   2518			host->ier &= ~SDHCI_INT_CARD_INT;
   2519
   2520		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   2521		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   2522	}
   2523}
   2524
   2525void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
   2526{
   2527	struct sdhci_host *host = mmc_priv(mmc);
   2528	unsigned long flags;
   2529
   2530	if (enable)
   2531		pm_runtime_get_noresume(mmc_dev(mmc));
   2532
   2533	spin_lock_irqsave(&host->lock, flags);
   2534	sdhci_enable_sdio_irq_nolock(host, enable);
   2535	spin_unlock_irqrestore(&host->lock, flags);
   2536
   2537	if (!enable)
   2538		pm_runtime_put_noidle(mmc_dev(mmc));
   2539}
   2540EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
   2541
   2542static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
   2543{
   2544	struct sdhci_host *host = mmc_priv(mmc);
   2545	unsigned long flags;
   2546
   2547	spin_lock_irqsave(&host->lock, flags);
   2548	sdhci_enable_sdio_irq_nolock(host, true);
   2549	spin_unlock_irqrestore(&host->lock, flags);
   2550}
   2551
   2552int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
   2553				      struct mmc_ios *ios)
   2554{
   2555	struct sdhci_host *host = mmc_priv(mmc);
   2556	u16 ctrl;
   2557	int ret;
   2558
   2559	/*
   2560	 * Signal Voltage Switching is only applicable for Host Controllers
   2561	 * v3.00 and above.
   2562	 */
   2563	if (host->version < SDHCI_SPEC_300)
   2564		return 0;
   2565
   2566	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2567
   2568	switch (ios->signal_voltage) {
   2569	case MMC_SIGNAL_VOLTAGE_330:
   2570		if (!(host->flags & SDHCI_SIGNALING_330))
   2571			return -EINVAL;
   2572		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
   2573		ctrl &= ~SDHCI_CTRL_VDD_180;
   2574		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
   2575
   2576		if (!IS_ERR(mmc->supply.vqmmc)) {
   2577			ret = mmc_regulator_set_vqmmc(mmc, ios);
   2578			if (ret < 0) {
   2579				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
   2580					mmc_hostname(mmc));
   2581				return -EIO;
   2582			}
   2583		}
   2584		/* Wait for 5ms */
   2585		usleep_range(5000, 5500);
   2586
   2587		/* 3.3V regulator output should be stable within 5 ms */
   2588		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2589		if (!(ctrl & SDHCI_CTRL_VDD_180))
   2590			return 0;
   2591
   2592		pr_warn("%s: 3.3V regulator output did not become stable\n",
   2593			mmc_hostname(mmc));
   2594
   2595		return -EAGAIN;
   2596	case MMC_SIGNAL_VOLTAGE_180:
   2597		if (!(host->flags & SDHCI_SIGNALING_180))
   2598			return -EINVAL;
   2599		if (!IS_ERR(mmc->supply.vqmmc)) {
   2600			ret = mmc_regulator_set_vqmmc(mmc, ios);
   2601			if (ret < 0) {
   2602				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
   2603					mmc_hostname(mmc));
   2604				return -EIO;
   2605			}
   2606		}
   2607
   2608		/*
   2609		 * Enable 1.8V Signal Enable in the Host Control2
   2610		 * register
   2611		 */
   2612		ctrl |= SDHCI_CTRL_VDD_180;
   2613		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
   2614
   2615		/* Some controller need to do more when switching */
   2616		if (host->ops->voltage_switch)
   2617			host->ops->voltage_switch(host);
   2618
   2619		/* 1.8V regulator output should be stable within 5 ms */
   2620		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2621		if (ctrl & SDHCI_CTRL_VDD_180)
   2622			return 0;
   2623
   2624		pr_warn("%s: 1.8V regulator output did not become stable\n",
   2625			mmc_hostname(mmc));
   2626
   2627		return -EAGAIN;
   2628	case MMC_SIGNAL_VOLTAGE_120:
   2629		if (!(host->flags & SDHCI_SIGNALING_120))
   2630			return -EINVAL;
   2631		if (!IS_ERR(mmc->supply.vqmmc)) {
   2632			ret = mmc_regulator_set_vqmmc(mmc, ios);
   2633			if (ret < 0) {
   2634				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
   2635					mmc_hostname(mmc));
   2636				return -EIO;
   2637			}
   2638		}
   2639		return 0;
   2640	default:
   2641		/* No signal voltage switch required */
   2642		return 0;
   2643	}
   2644}
   2645EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
   2646
   2647static int sdhci_card_busy(struct mmc_host *mmc)
   2648{
   2649	struct sdhci_host *host = mmc_priv(mmc);
   2650	u32 present_state;
   2651
   2652	/* Check whether DAT[0] is 0 */
   2653	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
   2654
   2655	return !(present_state & SDHCI_DATA_0_LVL_MASK);
   2656}
   2657
   2658static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
   2659{
   2660	struct sdhci_host *host = mmc_priv(mmc);
   2661	unsigned long flags;
   2662
   2663	spin_lock_irqsave(&host->lock, flags);
   2664	host->flags |= SDHCI_HS400_TUNING;
   2665	spin_unlock_irqrestore(&host->lock, flags);
   2666
   2667	return 0;
   2668}
   2669
   2670void sdhci_start_tuning(struct sdhci_host *host)
   2671{
   2672	u16 ctrl;
   2673
   2674	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2675	ctrl |= SDHCI_CTRL_EXEC_TUNING;
   2676	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
   2677		ctrl |= SDHCI_CTRL_TUNED_CLK;
   2678	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
   2679
   2680	/*
   2681	 * As per the Host Controller spec v3.00, tuning command
   2682	 * generates Buffer Read Ready interrupt, so enable that.
   2683	 *
   2684	 * Note: The spec clearly says that when tuning sequence
   2685	 * is being performed, the controller does not generate
   2686	 * interrupts other than Buffer Read Ready interrupt. But
   2687	 * to make sure we don't hit a controller bug, we _only_
   2688	 * enable Buffer Read Ready interrupt here.
   2689	 */
   2690	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
   2691	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
   2692}
   2693EXPORT_SYMBOL_GPL(sdhci_start_tuning);
   2694
   2695void sdhci_end_tuning(struct sdhci_host *host)
   2696{
   2697	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   2698	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   2699}
   2700EXPORT_SYMBOL_GPL(sdhci_end_tuning);
   2701
   2702void sdhci_reset_tuning(struct sdhci_host *host)
   2703{
   2704	u16 ctrl;
   2705
   2706	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2707	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
   2708	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
   2709	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
   2710}
   2711EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
   2712
   2713void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
   2714{
   2715	sdhci_reset_tuning(host);
   2716
   2717	sdhci_do_reset(host, SDHCI_RESET_CMD);
   2718	sdhci_do_reset(host, SDHCI_RESET_DATA);
   2719
   2720	sdhci_end_tuning(host);
   2721
   2722	mmc_send_abort_tuning(host->mmc, opcode);
   2723}
   2724EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
   2725
   2726/*
   2727 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
   2728 * tuning command does not have a data payload (or rather the hardware does it
   2729 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
   2730 * interrupt setup is different to other commands and there is no timeout
   2731 * interrupt so special handling is needed.
   2732 */
   2733void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
   2734{
   2735	struct mmc_host *mmc = host->mmc;
   2736	struct mmc_command cmd = {};
   2737	struct mmc_request mrq = {};
   2738	unsigned long flags;
   2739	u32 b = host->sdma_boundary;
   2740
   2741	spin_lock_irqsave(&host->lock, flags);
   2742
   2743	cmd.opcode = opcode;
   2744	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
   2745	cmd.mrq = &mrq;
   2746
   2747	mrq.cmd = &cmd;
   2748	/*
   2749	 * In response to CMD19, the card sends 64 bytes of tuning
   2750	 * block to the Host Controller. So we set the block size
   2751	 * to 64 here.
   2752	 */
   2753	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
   2754	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
   2755		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
   2756	else
   2757		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
   2758
   2759	/*
   2760	 * The tuning block is sent by the card to the host controller.
   2761	 * So we set the TRNS_READ bit in the Transfer Mode register.
   2762	 * This also takes care of setting DMA Enable and Multi Block
   2763	 * Select in the same register to 0.
   2764	 */
   2765	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
   2766
   2767	if (!sdhci_send_command_retry(host, &cmd, flags)) {
   2768		spin_unlock_irqrestore(&host->lock, flags);
   2769		host->tuning_done = 0;
   2770		return;
   2771	}
   2772
   2773	host->cmd = NULL;
   2774
   2775	sdhci_del_timer(host, &mrq);
   2776
   2777	host->tuning_done = 0;
   2778
   2779	spin_unlock_irqrestore(&host->lock, flags);
   2780
   2781	/* Wait for Buffer Read Ready interrupt */
   2782	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
   2783			   msecs_to_jiffies(50));
   2784
   2785}
   2786EXPORT_SYMBOL_GPL(sdhci_send_tuning);
   2787
   2788static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
   2789{
   2790	int i;
   2791
   2792	/*
   2793	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
   2794	 * of loops reaches tuning loop count.
   2795	 */
   2796	for (i = 0; i < host->tuning_loop_count; i++) {
   2797		u16 ctrl;
   2798
   2799		sdhci_send_tuning(host, opcode);
   2800
   2801		if (!host->tuning_done) {
   2802			pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
   2803				 mmc_hostname(host->mmc));
   2804			sdhci_abort_tuning(host, opcode);
   2805			return -ETIMEDOUT;
   2806		}
   2807
   2808		/* Spec does not require a delay between tuning cycles */
   2809		if (host->tuning_delay > 0)
   2810			mdelay(host->tuning_delay);
   2811
   2812		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2813		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
   2814			if (ctrl & SDHCI_CTRL_TUNED_CLK)
   2815				return 0; /* Success! */
   2816			break;
   2817		}
   2818
   2819	}
   2820
   2821	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
   2822		mmc_hostname(host->mmc));
   2823	sdhci_reset_tuning(host);
   2824	return -EAGAIN;
   2825}
   2826
   2827int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
   2828{
   2829	struct sdhci_host *host = mmc_priv(mmc);
   2830	int err = 0;
   2831	unsigned int tuning_count = 0;
   2832	bool hs400_tuning;
   2833
   2834	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
   2835
   2836	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
   2837		tuning_count = host->tuning_count;
   2838
   2839	/*
   2840	 * The Host Controller needs tuning in case of SDR104 and DDR50
   2841	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
   2842	 * the Capabilities register.
   2843	 * If the Host Controller supports the HS200 mode then the
   2844	 * tuning function has to be executed.
   2845	 */
   2846	switch (host->timing) {
   2847	/* HS400 tuning is done in HS200 mode */
   2848	case MMC_TIMING_MMC_HS400:
   2849		err = -EINVAL;
   2850		goto out;
   2851
   2852	case MMC_TIMING_MMC_HS200:
   2853		/*
   2854		 * Periodic re-tuning for HS400 is not expected to be needed, so
   2855		 * disable it here.
   2856		 */
   2857		if (hs400_tuning)
   2858			tuning_count = 0;
   2859		break;
   2860
   2861	case MMC_TIMING_UHS_SDR104:
   2862	case MMC_TIMING_UHS_DDR50:
   2863		break;
   2864
   2865	case MMC_TIMING_UHS_SDR50:
   2866		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
   2867			break;
   2868		fallthrough;
   2869
   2870	default:
   2871		goto out;
   2872	}
   2873
   2874	if (host->ops->platform_execute_tuning) {
   2875		err = host->ops->platform_execute_tuning(host, opcode);
   2876		goto out;
   2877	}
   2878
   2879	mmc->retune_period = tuning_count;
   2880
   2881	if (host->tuning_delay < 0)
   2882		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
   2883
   2884	sdhci_start_tuning(host);
   2885
   2886	host->tuning_err = __sdhci_execute_tuning(host, opcode);
   2887
   2888	sdhci_end_tuning(host);
   2889out:
   2890	host->flags &= ~SDHCI_HS400_TUNING;
   2891
   2892	return err;
   2893}
   2894EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
   2895
   2896static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
   2897{
   2898	/* Host Controller v3.00 defines preset value registers */
   2899	if (host->version < SDHCI_SPEC_300)
   2900		return;
   2901
   2902	/*
   2903	 * We only enable or disable Preset Value if they are not already
   2904	 * enabled or disabled respectively. Otherwise, we bail out.
   2905	 */
   2906	if (host->preset_enabled != enable) {
   2907		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
   2908
   2909		if (enable)
   2910			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
   2911		else
   2912			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
   2913
   2914		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
   2915
   2916		if (enable)
   2917			host->flags |= SDHCI_PV_ENABLED;
   2918		else
   2919			host->flags &= ~SDHCI_PV_ENABLED;
   2920
   2921		host->preset_enabled = enable;
   2922	}
   2923}
   2924
   2925static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
   2926				int err)
   2927{
   2928	struct mmc_data *data = mrq->data;
   2929
   2930	if (data->host_cookie != COOKIE_UNMAPPED)
   2931		dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
   2932			     mmc_get_dma_dir(data));
   2933
   2934	data->host_cookie = COOKIE_UNMAPPED;
   2935}
   2936
   2937static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
   2938{
   2939	struct sdhci_host *host = mmc_priv(mmc);
   2940
   2941	mrq->data->host_cookie = COOKIE_UNMAPPED;
   2942
   2943	/*
   2944	 * No pre-mapping in the pre hook if we're using the bounce buffer,
   2945	 * for that we would need two bounce buffers since one buffer is
   2946	 * in flight when this is getting called.
   2947	 */
   2948	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
   2949		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
   2950}
   2951
   2952static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
   2953{
   2954	if (host->data_cmd) {
   2955		host->data_cmd->error = err;
   2956		sdhci_finish_mrq(host, host->data_cmd->mrq);
   2957	}
   2958
   2959	if (host->cmd) {
   2960		host->cmd->error = err;
   2961		sdhci_finish_mrq(host, host->cmd->mrq);
   2962	}
   2963}
   2964
   2965static void sdhci_card_event(struct mmc_host *mmc)
   2966{
   2967	struct sdhci_host *host = mmc_priv(mmc);
   2968	unsigned long flags;
   2969	int present;
   2970
   2971	/* First check if client has provided their own card event */
   2972	if (host->ops->card_event)
   2973		host->ops->card_event(host);
   2974
   2975	present = mmc->ops->get_cd(mmc);
   2976
   2977	spin_lock_irqsave(&host->lock, flags);
   2978
   2979	/* Check sdhci_has_requests() first in case we are runtime suspended */
   2980	if (sdhci_has_requests(host) && !present) {
   2981		pr_err("%s: Card removed during transfer!\n",
   2982			mmc_hostname(mmc));
   2983		pr_err("%s: Resetting controller.\n",
   2984			mmc_hostname(mmc));
   2985
   2986		sdhci_do_reset(host, SDHCI_RESET_CMD);
   2987		sdhci_do_reset(host, SDHCI_RESET_DATA);
   2988
   2989		sdhci_error_out_mrqs(host, -ENOMEDIUM);
   2990	}
   2991
   2992	spin_unlock_irqrestore(&host->lock, flags);
   2993}
   2994
   2995static const struct mmc_host_ops sdhci_ops = {
   2996	.request	= sdhci_request,
   2997	.post_req	= sdhci_post_req,
   2998	.pre_req	= sdhci_pre_req,
   2999	.set_ios	= sdhci_set_ios,
   3000	.get_cd		= sdhci_get_cd,
   3001	.get_ro		= sdhci_get_ro,
   3002	.card_hw_reset	= sdhci_hw_reset,
   3003	.enable_sdio_irq = sdhci_enable_sdio_irq,
   3004	.ack_sdio_irq    = sdhci_ack_sdio_irq,
   3005	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
   3006	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
   3007	.execute_tuning			= sdhci_execute_tuning,
   3008	.card_event			= sdhci_card_event,
   3009	.card_busy	= sdhci_card_busy,
   3010};
   3011
   3012/*****************************************************************************\
   3013 *                                                                           *
   3014 * Request done                                                              *
   3015 *                                                                           *
   3016\*****************************************************************************/
   3017
   3018static bool sdhci_request_done(struct sdhci_host *host)
   3019{
   3020	unsigned long flags;
   3021	struct mmc_request *mrq;
   3022	int i;
   3023
   3024	spin_lock_irqsave(&host->lock, flags);
   3025
   3026	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
   3027		mrq = host->mrqs_done[i];
   3028		if (mrq)
   3029			break;
   3030	}
   3031
   3032	if (!mrq) {
   3033		spin_unlock_irqrestore(&host->lock, flags);
   3034		return true;
   3035	}
   3036
   3037	/*
   3038	 * The controller needs a reset of internal state machines
   3039	 * upon error conditions.
   3040	 */
   3041	if (sdhci_needs_reset(host, mrq)) {
   3042		/*
   3043		 * Do not finish until command and data lines are available for
   3044		 * reset. Note there can only be one other mrq, so it cannot
   3045		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
   3046		 * would both be null.
   3047		 */
   3048		if (host->cmd || host->data_cmd) {
   3049			spin_unlock_irqrestore(&host->lock, flags);
   3050			return true;
   3051		}
   3052
   3053		/* Some controllers need this kick or reset won't work here */
   3054		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
   3055			/* This is to force an update */
   3056			host->ops->set_clock(host, host->clock);
   3057
   3058		/*
   3059		 * Spec says we should do both at the same time, but Ricoh
   3060		 * controllers do not like that.
   3061		 */
   3062		sdhci_do_reset(host, SDHCI_RESET_CMD);
   3063		sdhci_do_reset(host, SDHCI_RESET_DATA);
   3064
   3065		host->pending_reset = false;
   3066	}
   3067
   3068	/*
   3069	 * Always unmap the data buffers if they were mapped by
   3070	 * sdhci_prepare_data() whenever we finish with a request.
   3071	 * This avoids leaking DMA mappings on error.
   3072	 */
   3073	if (host->flags & SDHCI_REQ_USE_DMA) {
   3074		struct mmc_data *data = mrq->data;
   3075
   3076		if (host->use_external_dma && data &&
   3077		    (mrq->cmd->error || data->error)) {
   3078			struct dma_chan *chan = sdhci_external_dma_channel(host, data);
   3079
   3080			host->mrqs_done[i] = NULL;
   3081			spin_unlock_irqrestore(&host->lock, flags);
   3082			dmaengine_terminate_sync(chan);
   3083			spin_lock_irqsave(&host->lock, flags);
   3084			sdhci_set_mrq_done(host, mrq);
   3085		}
   3086
   3087		if (data && data->host_cookie == COOKIE_MAPPED) {
   3088			if (host->bounce_buffer) {
   3089				/*
   3090				 * On reads, copy the bounced data into the
   3091				 * sglist
   3092				 */
   3093				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
   3094					unsigned int length = data->bytes_xfered;
   3095
   3096					if (length > host->bounce_buffer_size) {
   3097						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
   3098						       mmc_hostname(host->mmc),
   3099						       host->bounce_buffer_size,
   3100						       data->bytes_xfered);
   3101						/* Cap it down and continue */
   3102						length = host->bounce_buffer_size;
   3103					}
   3104					dma_sync_single_for_cpu(
   3105						mmc_dev(host->mmc),
   3106						host->bounce_addr,
   3107						host->bounce_buffer_size,
   3108						DMA_FROM_DEVICE);
   3109					sg_copy_from_buffer(data->sg,
   3110						data->sg_len,
   3111						host->bounce_buffer,
   3112						length);
   3113				} else {
   3114					/* No copying, just switch ownership */
   3115					dma_sync_single_for_cpu(
   3116						mmc_dev(host->mmc),
   3117						host->bounce_addr,
   3118						host->bounce_buffer_size,
   3119						mmc_get_dma_dir(data));
   3120				}
   3121			} else {
   3122				/* Unmap the raw data */
   3123				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
   3124					     data->sg_len,
   3125					     mmc_get_dma_dir(data));
   3126			}
   3127			data->host_cookie = COOKIE_UNMAPPED;
   3128		}
   3129	}
   3130
   3131	host->mrqs_done[i] = NULL;
   3132
   3133	spin_unlock_irqrestore(&host->lock, flags);
   3134
   3135	if (host->ops->request_done)
   3136		host->ops->request_done(host, mrq);
   3137	else
   3138		mmc_request_done(host->mmc, mrq);
   3139
   3140	return false;
   3141}
   3142
   3143static void sdhci_complete_work(struct work_struct *work)
   3144{
   3145	struct sdhci_host *host = container_of(work, struct sdhci_host,
   3146					       complete_work);
   3147
   3148	while (!sdhci_request_done(host))
   3149		;
   3150}
   3151
   3152static void sdhci_timeout_timer(struct timer_list *t)
   3153{
   3154	struct sdhci_host *host;
   3155	unsigned long flags;
   3156
   3157	host = from_timer(host, t, timer);
   3158
   3159	spin_lock_irqsave(&host->lock, flags);
   3160
   3161	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
   3162		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
   3163		       mmc_hostname(host->mmc));
   3164		sdhci_dumpregs(host);
   3165
   3166		host->cmd->error = -ETIMEDOUT;
   3167		sdhci_finish_mrq(host, host->cmd->mrq);
   3168	}
   3169
   3170	spin_unlock_irqrestore(&host->lock, flags);
   3171}
   3172
   3173static void sdhci_timeout_data_timer(struct timer_list *t)
   3174{
   3175	struct sdhci_host *host;
   3176	unsigned long flags;
   3177
   3178	host = from_timer(host, t, data_timer);
   3179
   3180	spin_lock_irqsave(&host->lock, flags);
   3181
   3182	if (host->data || host->data_cmd ||
   3183	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
   3184		pr_err("%s: Timeout waiting for hardware interrupt.\n",
   3185		       mmc_hostname(host->mmc));
   3186		sdhci_dumpregs(host);
   3187
   3188		if (host->data) {
   3189			host->data->error = -ETIMEDOUT;
   3190			__sdhci_finish_data(host, true);
   3191			queue_work(host->complete_wq, &host->complete_work);
   3192		} else if (host->data_cmd) {
   3193			host->data_cmd->error = -ETIMEDOUT;
   3194			sdhci_finish_mrq(host, host->data_cmd->mrq);
   3195		} else {
   3196			host->cmd->error = -ETIMEDOUT;
   3197			sdhci_finish_mrq(host, host->cmd->mrq);
   3198		}
   3199	}
   3200
   3201	spin_unlock_irqrestore(&host->lock, flags);
   3202}
   3203
   3204/*****************************************************************************\
   3205 *                                                                           *
   3206 * Interrupt handling                                                        *
   3207 *                                                                           *
   3208\*****************************************************************************/
   3209
   3210static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
   3211{
   3212	/* Handle auto-CMD12 error */
   3213	if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
   3214		struct mmc_request *mrq = host->data_cmd->mrq;
   3215		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
   3216		int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
   3217				   SDHCI_INT_DATA_TIMEOUT :
   3218				   SDHCI_INT_DATA_CRC;
   3219
   3220		/* Treat auto-CMD12 error the same as data error */
   3221		if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
   3222			*intmask_p |= data_err_bit;
   3223			return;
   3224		}
   3225	}
   3226
   3227	if (!host->cmd) {
   3228		/*
   3229		 * SDHCI recovers from errors by resetting the cmd and data
   3230		 * circuits.  Until that is done, there very well might be more
   3231		 * interrupts, so ignore them in that case.
   3232		 */
   3233		if (host->pending_reset)
   3234			return;
   3235		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
   3236		       mmc_hostname(host->mmc), (unsigned)intmask);
   3237		sdhci_dumpregs(host);
   3238		return;
   3239	}
   3240
   3241	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
   3242		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
   3243		if (intmask & SDHCI_INT_TIMEOUT)
   3244			host->cmd->error = -ETIMEDOUT;
   3245		else
   3246			host->cmd->error = -EILSEQ;
   3247
   3248		/* Treat data command CRC error the same as data CRC error */
   3249		if (host->cmd->data &&
   3250		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
   3251		     SDHCI_INT_CRC) {
   3252			host->cmd = NULL;
   3253			*intmask_p |= SDHCI_INT_DATA_CRC;
   3254			return;
   3255		}
   3256
   3257		__sdhci_finish_mrq(host, host->cmd->mrq);
   3258		return;
   3259	}
   3260
   3261	/* Handle auto-CMD23 error */
   3262	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
   3263		struct mmc_request *mrq = host->cmd->mrq;
   3264		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
   3265		int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
   3266			  -ETIMEDOUT :
   3267			  -EILSEQ;
   3268
   3269		if (sdhci_auto_cmd23(host, mrq)) {
   3270			mrq->sbc->error = err;
   3271			__sdhci_finish_mrq(host, mrq);
   3272			return;
   3273		}
   3274	}
   3275
   3276	if (intmask & SDHCI_INT_RESPONSE)
   3277		sdhci_finish_command(host);
   3278}
   3279
   3280static void sdhci_adma_show_error(struct sdhci_host *host)
   3281{
   3282	void *desc = host->adma_table;
   3283	dma_addr_t dma = host->adma_addr;
   3284
   3285	sdhci_dumpregs(host);
   3286
   3287	while (true) {
   3288		struct sdhci_adma2_64_desc *dma_desc = desc;
   3289
   3290		if (host->flags & SDHCI_USE_64_BIT_DMA)
   3291			SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
   3292			    (unsigned long long)dma,
   3293			    le32_to_cpu(dma_desc->addr_hi),
   3294			    le32_to_cpu(dma_desc->addr_lo),
   3295			    le16_to_cpu(dma_desc->len),
   3296			    le16_to_cpu(dma_desc->cmd));
   3297		else
   3298			SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
   3299			    (unsigned long long)dma,
   3300			    le32_to_cpu(dma_desc->addr_lo),
   3301			    le16_to_cpu(dma_desc->len),
   3302			    le16_to_cpu(dma_desc->cmd));
   3303
   3304		desc += host->desc_sz;
   3305		dma += host->desc_sz;
   3306
   3307		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
   3308			break;
   3309	}
   3310}
   3311
   3312static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
   3313{
   3314	u32 command;
   3315
   3316	/*
   3317	 * CMD19 generates _only_ Buffer Read Ready interrupt if
   3318	 * use sdhci_send_tuning.
   3319	 * Need to exclude this case: PIO mode and use mmc_send_tuning,
   3320	 * If not, sdhci_transfer_pio will never be called, make the
   3321	 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
   3322	 */
   3323	if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
   3324		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
   3325		if (command == MMC_SEND_TUNING_BLOCK ||
   3326		    command == MMC_SEND_TUNING_BLOCK_HS200) {
   3327			host->tuning_done = 1;
   3328			wake_up(&host->buf_ready_int);
   3329			return;
   3330		}
   3331	}
   3332
   3333	if (!host->data) {
   3334		struct mmc_command *data_cmd = host->data_cmd;
   3335
   3336		/*
   3337		 * The "data complete" interrupt is also used to
   3338		 * indicate that a busy state has ended. See comment
   3339		 * above in sdhci_cmd_irq().
   3340		 */
   3341		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
   3342			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
   3343				host->data_cmd = NULL;
   3344				data_cmd->error = -ETIMEDOUT;
   3345				__sdhci_finish_mrq(host, data_cmd->mrq);
   3346				return;
   3347			}
   3348			if (intmask & SDHCI_INT_DATA_END) {
   3349				host->data_cmd = NULL;
   3350				/*
   3351				 * Some cards handle busy-end interrupt
   3352				 * before the command completed, so make
   3353				 * sure we do things in the proper order.
   3354				 */
   3355				if (host->cmd == data_cmd)
   3356					return;
   3357
   3358				__sdhci_finish_mrq(host, data_cmd->mrq);
   3359				return;
   3360			}
   3361		}
   3362
   3363		/*
   3364		 * SDHCI recovers from errors by resetting the cmd and data
   3365		 * circuits. Until that is done, there very well might be more
   3366		 * interrupts, so ignore them in that case.
   3367		 */
   3368		if (host->pending_reset)
   3369			return;
   3370
   3371		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
   3372		       mmc_hostname(host->mmc), (unsigned)intmask);
   3373		sdhci_dumpregs(host);
   3374
   3375		return;
   3376	}
   3377
   3378	if (intmask & SDHCI_INT_DATA_TIMEOUT)
   3379		host->data->error = -ETIMEDOUT;
   3380	else if (intmask & SDHCI_INT_DATA_END_BIT)
   3381		host->data->error = -EILSEQ;
   3382	else if ((intmask & SDHCI_INT_DATA_CRC) &&
   3383		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
   3384			!= MMC_BUS_TEST_R)
   3385		host->data->error = -EILSEQ;
   3386	else if (intmask & SDHCI_INT_ADMA_ERROR) {
   3387		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
   3388		       intmask);
   3389		sdhci_adma_show_error(host);
   3390		host->data->error = -EIO;
   3391		if (host->ops->adma_workaround)
   3392			host->ops->adma_workaround(host, intmask);
   3393	}
   3394
   3395	if (host->data->error)
   3396		sdhci_finish_data(host);
   3397	else {
   3398		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
   3399			sdhci_transfer_pio(host);
   3400
   3401		/*
   3402		 * We currently don't do anything fancy with DMA
   3403		 * boundaries, but as we can't disable the feature
   3404		 * we need to at least restart the transfer.
   3405		 *
   3406		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
   3407		 * should return a valid address to continue from, but as
   3408		 * some controllers are faulty, don't trust them.
   3409		 */
   3410		if (intmask & SDHCI_INT_DMA_END) {
   3411			dma_addr_t dmastart, dmanow;
   3412
   3413			dmastart = sdhci_sdma_address(host);
   3414			dmanow = dmastart + host->data->bytes_xfered;
   3415			/*
   3416			 * Force update to the next DMA block boundary.
   3417			 */
   3418			dmanow = (dmanow &
   3419				~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
   3420				SDHCI_DEFAULT_BOUNDARY_SIZE;
   3421			host->data->bytes_xfered = dmanow - dmastart;
   3422			DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
   3423			    &dmastart, host->data->bytes_xfered, &dmanow);
   3424			sdhci_set_sdma_addr(host, dmanow);
   3425		}
   3426
   3427		if (intmask & SDHCI_INT_DATA_END) {
   3428			if (host->cmd == host->data_cmd) {
   3429				/*
   3430				 * Data managed to finish before the
   3431				 * command completed. Make sure we do
   3432				 * things in the proper order.
   3433				 */
   3434				host->data_early = 1;
   3435			} else {
   3436				sdhci_finish_data(host);
   3437			}
   3438		}
   3439	}
   3440}
   3441
   3442static inline bool sdhci_defer_done(struct sdhci_host *host,
   3443				    struct mmc_request *mrq)
   3444{
   3445	struct mmc_data *data = mrq->data;
   3446
   3447	return host->pending_reset || host->always_defer_done ||
   3448	       ((host->flags & SDHCI_REQ_USE_DMA) && data &&
   3449		data->host_cookie == COOKIE_MAPPED);
   3450}
   3451
   3452static irqreturn_t sdhci_irq(int irq, void *dev_id)
   3453{
   3454	struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
   3455	irqreturn_t result = IRQ_NONE;
   3456	struct sdhci_host *host = dev_id;
   3457	u32 intmask, mask, unexpected = 0;
   3458	int max_loops = 16;
   3459	int i;
   3460
   3461	spin_lock(&host->lock);
   3462
   3463	if (host->runtime_suspended) {
   3464		spin_unlock(&host->lock);
   3465		return IRQ_NONE;
   3466	}
   3467
   3468	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
   3469	if (!intmask || intmask == 0xffffffff) {
   3470		result = IRQ_NONE;
   3471		goto out;
   3472	}
   3473
   3474	do {
   3475		DBG("IRQ status 0x%08x\n", intmask);
   3476
   3477		if (host->ops->irq) {
   3478			intmask = host->ops->irq(host, intmask);
   3479			if (!intmask)
   3480				goto cont;
   3481		}
   3482
   3483		/* Clear selected interrupts. */
   3484		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
   3485				  SDHCI_INT_BUS_POWER);
   3486		sdhci_writel(host, mask, SDHCI_INT_STATUS);
   3487
   3488		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
   3489			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
   3490				      SDHCI_CARD_PRESENT;
   3491
   3492			/*
   3493			 * There is a observation on i.mx esdhc.  INSERT
   3494			 * bit will be immediately set again when it gets
   3495			 * cleared, if a card is inserted.  We have to mask
   3496			 * the irq to prevent interrupt storm which will
   3497			 * freeze the system.  And the REMOVE gets the
   3498			 * same situation.
   3499			 *
   3500			 * More testing are needed here to ensure it works
   3501			 * for other platforms though.
   3502			 */
   3503			host->ier &= ~(SDHCI_INT_CARD_INSERT |
   3504				       SDHCI_INT_CARD_REMOVE);
   3505			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
   3506					       SDHCI_INT_CARD_INSERT;
   3507			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   3508			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   3509
   3510			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
   3511				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
   3512
   3513			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
   3514						       SDHCI_INT_CARD_REMOVE);
   3515			result = IRQ_WAKE_THREAD;
   3516		}
   3517
   3518		if (intmask & SDHCI_INT_CMD_MASK)
   3519			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
   3520
   3521		if (intmask & SDHCI_INT_DATA_MASK)
   3522			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
   3523
   3524		if (intmask & SDHCI_INT_BUS_POWER)
   3525			pr_err("%s: Card is consuming too much power!\n",
   3526				mmc_hostname(host->mmc));
   3527
   3528		if (intmask & SDHCI_INT_RETUNE)
   3529			mmc_retune_needed(host->mmc);
   3530
   3531		if ((intmask & SDHCI_INT_CARD_INT) &&
   3532		    (host->ier & SDHCI_INT_CARD_INT)) {
   3533			sdhci_enable_sdio_irq_nolock(host, false);
   3534			sdio_signal_irq(host->mmc);
   3535		}
   3536
   3537		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
   3538			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
   3539			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
   3540			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
   3541
   3542		if (intmask) {
   3543			unexpected |= intmask;
   3544			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
   3545		}
   3546cont:
   3547		if (result == IRQ_NONE)
   3548			result = IRQ_HANDLED;
   3549
   3550		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
   3551	} while (intmask && --max_loops);
   3552
   3553	/* Determine if mrqs can be completed immediately */
   3554	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
   3555		struct mmc_request *mrq = host->mrqs_done[i];
   3556
   3557		if (!mrq)
   3558			continue;
   3559
   3560		if (sdhci_defer_done(host, mrq)) {
   3561			result = IRQ_WAKE_THREAD;
   3562		} else {
   3563			mrqs_done[i] = mrq;
   3564			host->mrqs_done[i] = NULL;
   3565		}
   3566	}
   3567out:
   3568	if (host->deferred_cmd)
   3569		result = IRQ_WAKE_THREAD;
   3570
   3571	spin_unlock(&host->lock);
   3572
   3573	/* Process mrqs ready for immediate completion */
   3574	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
   3575		if (!mrqs_done[i])
   3576			continue;
   3577
   3578		if (host->ops->request_done)
   3579			host->ops->request_done(host, mrqs_done[i]);
   3580		else
   3581			mmc_request_done(host->mmc, mrqs_done[i]);
   3582	}
   3583
   3584	if (unexpected) {
   3585		pr_err("%s: Unexpected interrupt 0x%08x.\n",
   3586			   mmc_hostname(host->mmc), unexpected);
   3587		sdhci_dumpregs(host);
   3588	}
   3589
   3590	return result;
   3591}
   3592
   3593static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
   3594{
   3595	struct sdhci_host *host = dev_id;
   3596	struct mmc_command *cmd;
   3597	unsigned long flags;
   3598	u32 isr;
   3599
   3600	while (!sdhci_request_done(host))
   3601		;
   3602
   3603	spin_lock_irqsave(&host->lock, flags);
   3604
   3605	isr = host->thread_isr;
   3606	host->thread_isr = 0;
   3607
   3608	cmd = host->deferred_cmd;
   3609	if (cmd && !sdhci_send_command_retry(host, cmd, flags))
   3610		sdhci_finish_mrq(host, cmd->mrq);
   3611
   3612	spin_unlock_irqrestore(&host->lock, flags);
   3613
   3614	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
   3615		struct mmc_host *mmc = host->mmc;
   3616
   3617		mmc->ops->card_event(mmc);
   3618		mmc_detect_change(mmc, msecs_to_jiffies(200));
   3619	}
   3620
   3621	return IRQ_HANDLED;
   3622}
   3623
   3624/*****************************************************************************\
   3625 *                                                                           *
   3626 * Suspend/resume                                                            *
   3627 *                                                                           *
   3628\*****************************************************************************/
   3629
   3630#ifdef CONFIG_PM
   3631
   3632static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
   3633{
   3634	return mmc_card_is_removable(host->mmc) &&
   3635	       !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
   3636	       !mmc_can_gpio_cd(host->mmc);
   3637}
   3638
   3639/*
   3640 * To enable wakeup events, the corresponding events have to be enabled in
   3641 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
   3642 * Table' in the SD Host Controller Standard Specification.
   3643 * It is useless to restore SDHCI_INT_ENABLE state in
   3644 * sdhci_disable_irq_wakeups() since it will be set by
   3645 * sdhci_enable_card_detection() or sdhci_init().
   3646 */
   3647static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
   3648{
   3649	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
   3650		  SDHCI_WAKE_ON_INT;
   3651	u32 irq_val = 0;
   3652	u8 wake_val = 0;
   3653	u8 val;
   3654
   3655	if (sdhci_cd_irq_can_wakeup(host)) {
   3656		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
   3657		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
   3658	}
   3659
   3660	if (mmc_card_wake_sdio_irq(host->mmc)) {
   3661		wake_val |= SDHCI_WAKE_ON_INT;
   3662		irq_val |= SDHCI_INT_CARD_INT;
   3663	}
   3664
   3665	if (!irq_val)
   3666		return false;
   3667
   3668	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
   3669	val &= ~mask;
   3670	val |= wake_val;
   3671	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
   3672
   3673	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
   3674
   3675	host->irq_wake_enabled = !enable_irq_wake(host->irq);
   3676
   3677	return host->irq_wake_enabled;
   3678}
   3679
   3680static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
   3681{
   3682	u8 val;
   3683	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
   3684			| SDHCI_WAKE_ON_INT;
   3685
   3686	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
   3687	val &= ~mask;
   3688	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
   3689
   3690	disable_irq_wake(host->irq);
   3691
   3692	host->irq_wake_enabled = false;
   3693}
   3694
   3695int sdhci_suspend_host(struct sdhci_host *host)
   3696{
   3697	sdhci_disable_card_detection(host);
   3698
   3699	mmc_retune_timer_stop(host->mmc);
   3700
   3701	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
   3702	    !sdhci_enable_irq_wakeups(host)) {
   3703		host->ier = 0;
   3704		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
   3705		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
   3706		free_irq(host->irq, host);
   3707	}
   3708
   3709	return 0;
   3710}
   3711
   3712EXPORT_SYMBOL_GPL(sdhci_suspend_host);
   3713
   3714int sdhci_resume_host(struct sdhci_host *host)
   3715{
   3716	struct mmc_host *mmc = host->mmc;
   3717	int ret = 0;
   3718
   3719	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
   3720		if (host->ops->enable_dma)
   3721			host->ops->enable_dma(host);
   3722	}
   3723
   3724	if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
   3725	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
   3726		/* Card keeps power but host controller does not */
   3727		sdhci_init(host, 0);
   3728		host->pwr = 0;
   3729		host->clock = 0;
   3730		mmc->ops->set_ios(mmc, &mmc->ios);
   3731	} else {
   3732		sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
   3733	}
   3734
   3735	if (host->irq_wake_enabled) {
   3736		sdhci_disable_irq_wakeups(host);
   3737	} else {
   3738		ret = request_threaded_irq(host->irq, sdhci_irq,
   3739					   sdhci_thread_irq, IRQF_SHARED,
   3740					   mmc_hostname(mmc), host);
   3741		if (ret)
   3742			return ret;
   3743	}
   3744
   3745	sdhci_enable_card_detection(host);
   3746
   3747	return ret;
   3748}
   3749
   3750EXPORT_SYMBOL_GPL(sdhci_resume_host);
   3751
   3752int sdhci_runtime_suspend_host(struct sdhci_host *host)
   3753{
   3754	unsigned long flags;
   3755
   3756	mmc_retune_timer_stop(host->mmc);
   3757
   3758	spin_lock_irqsave(&host->lock, flags);
   3759	host->ier &= SDHCI_INT_CARD_INT;
   3760	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   3761	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   3762	spin_unlock_irqrestore(&host->lock, flags);
   3763
   3764	synchronize_hardirq(host->irq);
   3765
   3766	spin_lock_irqsave(&host->lock, flags);
   3767	host->runtime_suspended = true;
   3768	spin_unlock_irqrestore(&host->lock, flags);
   3769
   3770	return 0;
   3771}
   3772EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
   3773
   3774int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
   3775{
   3776	struct mmc_host *mmc = host->mmc;
   3777	unsigned long flags;
   3778	int host_flags = host->flags;
   3779
   3780	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
   3781		if (host->ops->enable_dma)
   3782			host->ops->enable_dma(host);
   3783	}
   3784
   3785	sdhci_init(host, soft_reset);
   3786
   3787	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
   3788	    mmc->ios.power_mode != MMC_POWER_OFF) {
   3789		/* Force clock and power re-program */
   3790		host->pwr = 0;
   3791		host->clock = 0;
   3792		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
   3793		mmc->ops->set_ios(mmc, &mmc->ios);
   3794
   3795		if ((host_flags & SDHCI_PV_ENABLED) &&
   3796		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
   3797			spin_lock_irqsave(&host->lock, flags);
   3798			sdhci_enable_preset_value(host, true);
   3799			spin_unlock_irqrestore(&host->lock, flags);
   3800		}
   3801
   3802		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
   3803		    mmc->ops->hs400_enhanced_strobe)
   3804			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
   3805	}
   3806
   3807	spin_lock_irqsave(&host->lock, flags);
   3808
   3809	host->runtime_suspended = false;
   3810
   3811	/* Enable SDIO IRQ */
   3812	if (sdio_irq_claimed(mmc))
   3813		sdhci_enable_sdio_irq_nolock(host, true);
   3814
   3815	/* Enable Card Detection */
   3816	sdhci_enable_card_detection(host);
   3817
   3818	spin_unlock_irqrestore(&host->lock, flags);
   3819
   3820	return 0;
   3821}
   3822EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
   3823
   3824#endif /* CONFIG_PM */
   3825
   3826/*****************************************************************************\
   3827 *                                                                           *
   3828 * Command Queue Engine (CQE) helpers                                        *
   3829 *                                                                           *
   3830\*****************************************************************************/
   3831
   3832void sdhci_cqe_enable(struct mmc_host *mmc)
   3833{
   3834	struct sdhci_host *host = mmc_priv(mmc);
   3835	unsigned long flags;
   3836	u8 ctrl;
   3837
   3838	spin_lock_irqsave(&host->lock, flags);
   3839
   3840	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
   3841	ctrl &= ~SDHCI_CTRL_DMA_MASK;
   3842	/*
   3843	 * Host from V4.10 supports ADMA3 DMA type.
   3844	 * ADMA3 performs integrated descriptor which is more suitable
   3845	 * for cmd queuing to fetch both command and transfer descriptors.
   3846	 */
   3847	if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
   3848		ctrl |= SDHCI_CTRL_ADMA3;
   3849	else if (host->flags & SDHCI_USE_64_BIT_DMA)
   3850		ctrl |= SDHCI_CTRL_ADMA64;
   3851	else
   3852		ctrl |= SDHCI_CTRL_ADMA32;
   3853	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
   3854
   3855	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
   3856		     SDHCI_BLOCK_SIZE);
   3857
   3858	/* Set maximum timeout */
   3859	sdhci_set_timeout(host, NULL);
   3860
   3861	host->ier = host->cqe_ier;
   3862
   3863	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
   3864	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
   3865
   3866	host->cqe_on = true;
   3867
   3868	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
   3869		 mmc_hostname(mmc), host->ier,
   3870		 sdhci_readl(host, SDHCI_INT_STATUS));
   3871
   3872	spin_unlock_irqrestore(&host->lock, flags);
   3873}
   3874EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
   3875
   3876void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
   3877{
   3878	struct sdhci_host *host = mmc_priv(mmc);
   3879	unsigned long flags;
   3880
   3881	spin_lock_irqsave(&host->lock, flags);
   3882
   3883	sdhci_set_default_irqs(host);
   3884
   3885	host->cqe_on = false;
   3886
   3887	if (recovery) {
   3888		sdhci_do_reset(host, SDHCI_RESET_CMD);
   3889		sdhci_do_reset(host, SDHCI_RESET_DATA);
   3890	}
   3891
   3892	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
   3893		 mmc_hostname(mmc), host->ier,
   3894		 sdhci_readl(host, SDHCI_INT_STATUS));
   3895
   3896	spin_unlock_irqrestore(&host->lock, flags);
   3897}
   3898EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
   3899
   3900bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
   3901		   int *data_error)
   3902{
   3903	u32 mask;
   3904
   3905	if (!host->cqe_on)
   3906		return false;
   3907
   3908	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
   3909		*cmd_error = -EILSEQ;
   3910	else if (intmask & SDHCI_INT_TIMEOUT)
   3911		*cmd_error = -ETIMEDOUT;
   3912	else
   3913		*cmd_error = 0;
   3914
   3915	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
   3916		*data_error = -EILSEQ;
   3917	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
   3918		*data_error = -ETIMEDOUT;
   3919	else if (intmask & SDHCI_INT_ADMA_ERROR)
   3920		*data_error = -EIO;
   3921	else
   3922		*data_error = 0;
   3923
   3924	/* Clear selected interrupts. */
   3925	mask = intmask & host->cqe_ier;
   3926	sdhci_writel(host, mask, SDHCI_INT_STATUS);
   3927
   3928	if (intmask & SDHCI_INT_BUS_POWER)
   3929		pr_err("%s: Card is consuming too much power!\n",
   3930		       mmc_hostname(host->mmc));
   3931
   3932	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
   3933	if (intmask) {
   3934		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
   3935		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
   3936		       mmc_hostname(host->mmc), intmask);
   3937		sdhci_dumpregs(host);
   3938	}
   3939
   3940	return true;
   3941}
   3942EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
   3943
   3944/*****************************************************************************\
   3945 *                                                                           *
   3946 * Device allocation/registration                                            *
   3947 *                                                                           *
   3948\*****************************************************************************/
   3949
   3950struct sdhci_host *sdhci_alloc_host(struct device *dev,
   3951	size_t priv_size)
   3952{
   3953	struct mmc_host *mmc;
   3954	struct sdhci_host *host;
   3955
   3956	WARN_ON(dev == NULL);
   3957
   3958	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
   3959	if (!mmc)
   3960		return ERR_PTR(-ENOMEM);
   3961
   3962	host = mmc_priv(mmc);
   3963	host->mmc = mmc;
   3964	host->mmc_host_ops = sdhci_ops;
   3965	mmc->ops = &host->mmc_host_ops;
   3966
   3967	host->flags = SDHCI_SIGNALING_330;
   3968
   3969	host->cqe_ier     = SDHCI_CQE_INT_MASK;
   3970	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
   3971
   3972	host->tuning_delay = -1;
   3973	host->tuning_loop_count = MAX_TUNING_LOOP;
   3974
   3975	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
   3976
   3977	/*
   3978	 * The DMA table descriptor count is calculated as the maximum
   3979	 * number of segments times 2, to allow for an alignment
   3980	 * descriptor for each segment, plus 1 for a nop end descriptor.
   3981	 */
   3982	host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
   3983	host->max_adma = 65536;
   3984
   3985	host->max_timeout_count = 0xE;
   3986
   3987	return host;
   3988}
   3989
   3990EXPORT_SYMBOL_GPL(sdhci_alloc_host);
   3991
   3992static int sdhci_set_dma_mask(struct sdhci_host *host)
   3993{
   3994	struct mmc_host *mmc = host->mmc;
   3995	struct device *dev = mmc_dev(mmc);
   3996	int ret = -EINVAL;
   3997
   3998	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
   3999		host->flags &= ~SDHCI_USE_64_BIT_DMA;
   4000
   4001	/* Try 64-bit mask if hardware is capable  of it */
   4002	if (host->flags & SDHCI_USE_64_BIT_DMA) {
   4003		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
   4004		if (ret) {
   4005			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
   4006				mmc_hostname(mmc));
   4007			host->flags &= ~SDHCI_USE_64_BIT_DMA;
   4008		}
   4009	}
   4010
   4011	/* 32-bit mask as default & fallback */
   4012	if (ret) {
   4013		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
   4014		if (ret)
   4015			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
   4016				mmc_hostname(mmc));
   4017	}
   4018
   4019	return ret;
   4020}
   4021
   4022void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
   4023		       const u32 *caps, const u32 *caps1)
   4024{
   4025	u16 v;
   4026	u64 dt_caps_mask = 0;
   4027	u64 dt_caps = 0;
   4028
   4029	if (host->read_caps)
   4030		return;
   4031
   4032	host->read_caps = true;
   4033
   4034	if (debug_quirks)
   4035		host->quirks = debug_quirks;
   4036
   4037	if (debug_quirks2)
   4038		host->quirks2 = debug_quirks2;
   4039
   4040	sdhci_do_reset(host, SDHCI_RESET_ALL);
   4041
   4042	if (host->v4_mode)
   4043		sdhci_do_enable_v4_mode(host);
   4044
   4045	device_property_read_u64(mmc_dev(host->mmc),
   4046				 "sdhci-caps-mask", &dt_caps_mask);
   4047	device_property_read_u64(mmc_dev(host->mmc),
   4048				 "sdhci-caps", &dt_caps);
   4049
   4050	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
   4051	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
   4052
   4053	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
   4054		return;
   4055
   4056	if (caps) {
   4057		host->caps = *caps;
   4058	} else {
   4059		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
   4060		host->caps &= ~lower_32_bits(dt_caps_mask);
   4061		host->caps |= lower_32_bits(dt_caps);
   4062	}
   4063
   4064	if (host->version < SDHCI_SPEC_300)
   4065		return;
   4066
   4067	if (caps1) {
   4068		host->caps1 = *caps1;
   4069	} else {
   4070		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
   4071		host->caps1 &= ~upper_32_bits(dt_caps_mask);
   4072		host->caps1 |= upper_32_bits(dt_caps);
   4073	}
   4074}
   4075EXPORT_SYMBOL_GPL(__sdhci_read_caps);
   4076
   4077static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
   4078{
   4079	struct mmc_host *mmc = host->mmc;
   4080	unsigned int max_blocks;
   4081	unsigned int bounce_size;
   4082	int ret;
   4083
   4084	/*
   4085	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
   4086	 * has diminishing returns, this is probably because SD/MMC
   4087	 * cards are usually optimized to handle this size of requests.
   4088	 */
   4089	bounce_size = SZ_64K;
   4090	/*
   4091	 * Adjust downwards to maximum request size if this is less
   4092	 * than our segment size, else hammer down the maximum
   4093	 * request size to the maximum buffer size.
   4094	 */
   4095	if (mmc->max_req_size < bounce_size)
   4096		bounce_size = mmc->max_req_size;
   4097	max_blocks = bounce_size / 512;
   4098
   4099	/*
   4100	 * When we just support one segment, we can get significant
   4101	 * speedups by the help of a bounce buffer to group scattered
   4102	 * reads/writes together.
   4103	 */
   4104	host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
   4105					   bounce_size,
   4106					   GFP_KERNEL);
   4107	if (!host->bounce_buffer) {
   4108		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
   4109		       mmc_hostname(mmc),
   4110		       bounce_size);
   4111		/*
   4112		 * Exiting with zero here makes sure we proceed with
   4113		 * mmc->max_segs == 1.
   4114		 */
   4115		return;
   4116	}
   4117
   4118	host->bounce_addr = dma_map_single(mmc_dev(mmc),
   4119					   host->bounce_buffer,
   4120					   bounce_size,
   4121					   DMA_BIDIRECTIONAL);
   4122	ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
   4123	if (ret) {
   4124		devm_kfree(mmc_dev(mmc), host->bounce_buffer);
   4125		host->bounce_buffer = NULL;
   4126		/* Again fall back to max_segs == 1 */
   4127		return;
   4128	}
   4129
   4130	host->bounce_buffer_size = bounce_size;
   4131
   4132	/* Lie about this since we're bouncing */
   4133	mmc->max_segs = max_blocks;
   4134	mmc->max_seg_size = bounce_size;
   4135	mmc->max_req_size = bounce_size;
   4136
   4137	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
   4138		mmc_hostname(mmc), max_blocks, bounce_size);
   4139}
   4140
   4141static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
   4142{
   4143	/*
   4144	 * According to SD Host Controller spec v4.10, bit[27] added from
   4145	 * version 4.10 in Capabilities Register is used as 64-bit System
   4146	 * Address support for V4 mode.
   4147	 */
   4148	if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
   4149		return host->caps & SDHCI_CAN_64BIT_V4;
   4150
   4151	return host->caps & SDHCI_CAN_64BIT;
   4152}
   4153
   4154int sdhci_setup_host(struct sdhci_host *host)
   4155{
   4156	struct mmc_host *mmc;
   4157	u32 max_current_caps;
   4158	unsigned int ocr_avail;
   4159	unsigned int override_timeout_clk;
   4160	u32 max_clk;
   4161	int ret = 0;
   4162	bool enable_vqmmc = false;
   4163
   4164	WARN_ON(host == NULL);
   4165	if (host == NULL)
   4166		return -EINVAL;
   4167
   4168	mmc = host->mmc;
   4169
   4170	/*
   4171	 * If there are external regulators, get them. Note this must be done
   4172	 * early before resetting the host and reading the capabilities so that
   4173	 * the host can take the appropriate action if regulators are not
   4174	 * available.
   4175	 */
   4176	if (!mmc->supply.vqmmc) {
   4177		ret = mmc_regulator_get_supply(mmc);
   4178		if (ret)
   4179			return ret;
   4180		enable_vqmmc  = true;
   4181	}
   4182
   4183	DBG("Version:   0x%08x | Present:  0x%08x\n",
   4184	    sdhci_readw(host, SDHCI_HOST_VERSION),
   4185	    sdhci_readl(host, SDHCI_PRESENT_STATE));
   4186	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
   4187	    sdhci_readl(host, SDHCI_CAPABILITIES),
   4188	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
   4189
   4190	sdhci_read_caps(host);
   4191
   4192	override_timeout_clk = host->timeout_clk;
   4193
   4194	if (host->version > SDHCI_SPEC_420) {
   4195		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
   4196		       mmc_hostname(mmc), host->version);
   4197	}
   4198
   4199	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
   4200		host->flags |= SDHCI_USE_SDMA;
   4201	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
   4202		DBG("Controller doesn't have SDMA capability\n");
   4203	else
   4204		host->flags |= SDHCI_USE_SDMA;
   4205
   4206	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
   4207		(host->flags & SDHCI_USE_SDMA)) {
   4208		DBG("Disabling DMA as it is marked broken\n");
   4209		host->flags &= ~SDHCI_USE_SDMA;
   4210	}
   4211
   4212	if ((host->version >= SDHCI_SPEC_200) &&
   4213		(host->caps & SDHCI_CAN_DO_ADMA2))
   4214		host->flags |= SDHCI_USE_ADMA;
   4215
   4216	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
   4217		(host->flags & SDHCI_USE_ADMA)) {
   4218		DBG("Disabling ADMA as it is marked broken\n");
   4219		host->flags &= ~SDHCI_USE_ADMA;
   4220	}
   4221
   4222	if (sdhci_can_64bit_dma(host))
   4223		host->flags |= SDHCI_USE_64_BIT_DMA;
   4224
   4225	if (host->use_external_dma) {
   4226		ret = sdhci_external_dma_init(host);
   4227		if (ret == -EPROBE_DEFER)
   4228			goto unreg;
   4229		/*
   4230		 * Fall back to use the DMA/PIO integrated in standard SDHCI
   4231		 * instead of external DMA devices.
   4232		 */
   4233		else if (ret)
   4234			sdhci_switch_external_dma(host, false);
   4235		/* Disable internal DMA sources */
   4236		else
   4237			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
   4238	}
   4239
   4240	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
   4241		if (host->ops->set_dma_mask)
   4242			ret = host->ops->set_dma_mask(host);
   4243		else
   4244			ret = sdhci_set_dma_mask(host);
   4245
   4246		if (!ret && host->ops->enable_dma)
   4247			ret = host->ops->enable_dma(host);
   4248
   4249		if (ret) {
   4250			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
   4251				mmc_hostname(mmc));
   4252			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
   4253
   4254			ret = 0;
   4255		}
   4256	}
   4257
   4258	/* SDMA does not support 64-bit DMA if v4 mode not set */
   4259	if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
   4260		host->flags &= ~SDHCI_USE_SDMA;
   4261
   4262	if (host->flags & SDHCI_USE_ADMA) {
   4263		dma_addr_t dma;
   4264		void *buf;
   4265
   4266		if (!(host->flags & SDHCI_USE_64_BIT_DMA))
   4267			host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
   4268		else if (!host->alloc_desc_sz)
   4269			host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
   4270
   4271		host->desc_sz = host->alloc_desc_sz;
   4272		host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
   4273
   4274		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
   4275		/*
   4276		 * Use zalloc to zero the reserved high 32-bits of 128-bit
   4277		 * descriptors so that they never need to be written.
   4278		 */
   4279		buf = dma_alloc_coherent(mmc_dev(mmc),
   4280					 host->align_buffer_sz + host->adma_table_sz,
   4281					 &dma, GFP_KERNEL);
   4282		if (!buf) {
   4283			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
   4284				mmc_hostname(mmc));
   4285			host->flags &= ~SDHCI_USE_ADMA;
   4286		} else if ((dma + host->align_buffer_sz) &
   4287			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
   4288			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
   4289				mmc_hostname(mmc));
   4290			host->flags &= ~SDHCI_USE_ADMA;
   4291			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
   4292					  host->adma_table_sz, buf, dma);
   4293		} else {
   4294			host->align_buffer = buf;
   4295			host->align_addr = dma;
   4296
   4297			host->adma_table = buf + host->align_buffer_sz;
   4298			host->adma_addr = dma + host->align_buffer_sz;
   4299		}
   4300	}
   4301
   4302	/*
   4303	 * If we use DMA, then it's up to the caller to set the DMA
   4304	 * mask, but PIO does not need the hw shim so we set a new
   4305	 * mask here in that case.
   4306	 */
   4307	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
   4308		host->dma_mask = DMA_BIT_MASK(64);
   4309		mmc_dev(mmc)->dma_mask = &host->dma_mask;
   4310	}
   4311
   4312	if (host->version >= SDHCI_SPEC_300)
   4313		host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
   4314	else
   4315		host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
   4316
   4317	host->max_clk *= 1000000;
   4318	if (host->max_clk == 0 || host->quirks &
   4319			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
   4320		if (!host->ops->get_max_clock) {
   4321			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
   4322			       mmc_hostname(mmc));
   4323			ret = -ENODEV;
   4324			goto undma;
   4325		}
   4326		host->max_clk = host->ops->get_max_clock(host);
   4327	}
   4328
   4329	/*
   4330	 * In case of Host Controller v3.00, find out whether clock
   4331	 * multiplier is supported.
   4332	 */
   4333	host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
   4334
   4335	/*
   4336	 * In case the value in Clock Multiplier is 0, then programmable
   4337	 * clock mode is not supported, otherwise the actual clock
   4338	 * multiplier is one more than the value of Clock Multiplier
   4339	 * in the Capabilities Register.
   4340	 */
   4341	if (host->clk_mul)
   4342		host->clk_mul += 1;
   4343
   4344	/*
   4345	 * Set host parameters.
   4346	 */
   4347	max_clk = host->max_clk;
   4348
   4349	if (host->ops->get_min_clock)
   4350		mmc->f_min = host->ops->get_min_clock(host);
   4351	else if (host->version >= SDHCI_SPEC_300) {
   4352		if (host->clk_mul)
   4353			max_clk = host->max_clk * host->clk_mul;
   4354		/*
   4355		 * Divided Clock Mode minimum clock rate is always less than
   4356		 * Programmable Clock Mode minimum clock rate.
   4357		 */
   4358		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
   4359	} else
   4360		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
   4361
   4362	if (!mmc->f_max || mmc->f_max > max_clk)
   4363		mmc->f_max = max_clk;
   4364
   4365	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
   4366		host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
   4367
   4368		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
   4369			host->timeout_clk *= 1000;
   4370
   4371		if (host->timeout_clk == 0) {
   4372			if (!host->ops->get_timeout_clock) {
   4373				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
   4374					mmc_hostname(mmc));
   4375				ret = -ENODEV;
   4376				goto undma;
   4377			}
   4378
   4379			host->timeout_clk =
   4380				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
   4381					     1000);
   4382		}
   4383
   4384		if (override_timeout_clk)
   4385			host->timeout_clk = override_timeout_clk;
   4386
   4387		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
   4388			host->ops->get_max_timeout_count(host) : 1 << 27;
   4389		mmc->max_busy_timeout /= host->timeout_clk;
   4390	}
   4391
   4392	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
   4393	    !host->ops->get_max_timeout_count)
   4394		mmc->max_busy_timeout = 0;
   4395
   4396	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
   4397	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
   4398
   4399	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
   4400		host->flags |= SDHCI_AUTO_CMD12;
   4401
   4402	/*
   4403	 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
   4404	 * For v4 mode, SDMA may use Auto-CMD23 as well.
   4405	 */
   4406	if ((host->version >= SDHCI_SPEC_300) &&
   4407	    ((host->flags & SDHCI_USE_ADMA) ||
   4408	     !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
   4409	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
   4410		host->flags |= SDHCI_AUTO_CMD23;
   4411		DBG("Auto-CMD23 available\n");
   4412	} else {
   4413		DBG("Auto-CMD23 unavailable\n");
   4414	}
   4415
   4416	/*
   4417	 * A controller may support 8-bit width, but the board itself
   4418	 * might not have the pins brought out.  Boards that support
   4419	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
   4420	 * their platform code before calling sdhci_add_host(), and we
   4421	 * won't assume 8-bit width for hosts without that CAP.
   4422	 */
   4423	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
   4424		mmc->caps |= MMC_CAP_4_BIT_DATA;
   4425
   4426	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
   4427		mmc->caps &= ~MMC_CAP_CMD23;
   4428
   4429	if (host->caps & SDHCI_CAN_DO_HISPD)
   4430		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
   4431
   4432	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
   4433	    mmc_card_is_removable(mmc) &&
   4434	    mmc_gpio_get_cd(mmc) < 0)
   4435		mmc->caps |= MMC_CAP_NEEDS_POLL;
   4436
   4437	if (!IS_ERR(mmc->supply.vqmmc)) {
   4438		if (enable_vqmmc) {
   4439			ret = regulator_enable(mmc->supply.vqmmc);
   4440			host->sdhci_core_to_disable_vqmmc = !ret;
   4441		}
   4442
   4443		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
   4444		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
   4445						    1950000))
   4446			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
   4447					 SDHCI_SUPPORT_SDR50 |
   4448					 SDHCI_SUPPORT_DDR50);
   4449
   4450		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
   4451		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
   4452						    3600000))
   4453			host->flags &= ~SDHCI_SIGNALING_330;
   4454
   4455		if (ret) {
   4456			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
   4457				mmc_hostname(mmc), ret);
   4458			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
   4459		}
   4460
   4461	}
   4462
   4463	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
   4464		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
   4465				 SDHCI_SUPPORT_DDR50);
   4466		/*
   4467		 * The SDHCI controller in a SoC might support HS200/HS400
   4468		 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
   4469		 * but if the board is modeled such that the IO lines are not
   4470		 * connected to 1.8v then HS200/HS400 cannot be supported.
   4471		 * Disable HS200/HS400 if the board does not have 1.8v connected
   4472		 * to the IO lines. (Applicable for other modes in 1.8v)
   4473		 */
   4474		mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
   4475		mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
   4476	}
   4477
   4478	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
   4479	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
   4480			   SDHCI_SUPPORT_DDR50))
   4481		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
   4482
   4483	/* SDR104 supports also implies SDR50 support */
   4484	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
   4485		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
   4486		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
   4487		 * field can be promoted to support HS200.
   4488		 */
   4489		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
   4490			mmc->caps2 |= MMC_CAP2_HS200;
   4491	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
   4492		mmc->caps |= MMC_CAP_UHS_SDR50;
   4493	}
   4494
   4495	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
   4496	    (host->caps1 & SDHCI_SUPPORT_HS400))
   4497		mmc->caps2 |= MMC_CAP2_HS400;
   4498
   4499	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
   4500	    (IS_ERR(mmc->supply.vqmmc) ||
   4501	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
   4502					     1300000)))
   4503		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
   4504
   4505	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
   4506	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
   4507		mmc->caps |= MMC_CAP_UHS_DDR50;
   4508
   4509	/* Does the host need tuning for SDR50? */
   4510	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
   4511		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
   4512
   4513	/* Driver Type(s) (A, C, D) supported by the host */
   4514	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
   4515		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
   4516	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
   4517		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
   4518	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
   4519		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
   4520
   4521	/* Initial value for re-tuning timer count */
   4522	host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
   4523				       host->caps1);
   4524
   4525	/*
   4526	 * In case Re-tuning Timer is not disabled, the actual value of
   4527	 * re-tuning timer will be 2 ^ (n - 1).
   4528	 */
   4529	if (host->tuning_count)
   4530		host->tuning_count = 1 << (host->tuning_count - 1);
   4531
   4532	/* Re-tuning mode supported by the Host Controller */
   4533	host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
   4534
   4535	ocr_avail = 0;
   4536
   4537	/*
   4538	 * According to SD Host Controller spec v3.00, if the Host System
   4539	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
   4540	 * the value is meaningful only if Voltage Support in the Capabilities
   4541	 * register is set. The actual current value is 4 times the register
   4542	 * value.
   4543	 */
   4544	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
   4545	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
   4546		int curr = regulator_get_current_limit(mmc->supply.vmmc);
   4547		if (curr > 0) {
   4548
   4549			/* convert to SDHCI_MAX_CURRENT format */
   4550			curr = curr/1000;  /* convert to mA */
   4551			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
   4552
   4553			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
   4554			max_current_caps =
   4555				FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
   4556				FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
   4557				FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
   4558		}
   4559	}
   4560
   4561	if (host->caps & SDHCI_CAN_VDD_330) {
   4562		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
   4563
   4564		mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
   4565						 max_current_caps) *
   4566						SDHCI_MAX_CURRENT_MULTIPLIER;
   4567	}
   4568	if (host->caps & SDHCI_CAN_VDD_300) {
   4569		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
   4570
   4571		mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
   4572						 max_current_caps) *
   4573						SDHCI_MAX_CURRENT_MULTIPLIER;
   4574	}
   4575	if (host->caps & SDHCI_CAN_VDD_180) {
   4576		ocr_avail |= MMC_VDD_165_195;
   4577
   4578		mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
   4579						 max_current_caps) *
   4580						SDHCI_MAX_CURRENT_MULTIPLIER;
   4581	}
   4582
   4583	/* If OCR set by host, use it instead. */
   4584	if (host->ocr_mask)
   4585		ocr_avail = host->ocr_mask;
   4586
   4587	/* If OCR set by external regulators, give it highest prio. */
   4588	if (mmc->ocr_avail)
   4589		ocr_avail = mmc->ocr_avail;
   4590
   4591	mmc->ocr_avail = ocr_avail;
   4592	mmc->ocr_avail_sdio = ocr_avail;
   4593	if (host->ocr_avail_sdio)
   4594		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
   4595	mmc->ocr_avail_sd = ocr_avail;
   4596	if (host->ocr_avail_sd)
   4597		mmc->ocr_avail_sd &= host->ocr_avail_sd;
   4598	else /* normal SD controllers don't support 1.8V */
   4599		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
   4600	mmc->ocr_avail_mmc = ocr_avail;
   4601	if (host->ocr_avail_mmc)
   4602		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
   4603
   4604	if (mmc->ocr_avail == 0) {
   4605		pr_err("%s: Hardware doesn't report any support voltages.\n",
   4606		       mmc_hostname(mmc));
   4607		ret = -ENODEV;
   4608		goto unreg;
   4609	}
   4610
   4611	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
   4612			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
   4613			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
   4614	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
   4615		host->flags |= SDHCI_SIGNALING_180;
   4616
   4617	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
   4618		host->flags |= SDHCI_SIGNALING_120;
   4619
   4620	spin_lock_init(&host->lock);
   4621
   4622	/*
   4623	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
   4624	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
   4625	 * is less anyway.
   4626	 */
   4627	mmc->max_req_size = 524288;
   4628
   4629	/*
   4630	 * Maximum number of segments. Depends on if the hardware
   4631	 * can do scatter/gather or not.
   4632	 */
   4633	if (host->flags & SDHCI_USE_ADMA) {
   4634		mmc->max_segs = SDHCI_MAX_SEGS;
   4635	} else if (host->flags & SDHCI_USE_SDMA) {
   4636		mmc->max_segs = 1;
   4637		mmc->max_req_size = min_t(size_t, mmc->max_req_size,
   4638					  dma_max_mapping_size(mmc_dev(mmc)));
   4639	} else { /* PIO */
   4640		mmc->max_segs = SDHCI_MAX_SEGS;
   4641	}
   4642
   4643	/*
   4644	 * Maximum segment size. Could be one segment with the maximum number
   4645	 * of bytes. When doing hardware scatter/gather, each entry cannot
   4646	 * be larger than 64 KiB though.
   4647	 */
   4648	if (host->flags & SDHCI_USE_ADMA) {
   4649		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
   4650			host->max_adma = 65532; /* 32-bit alignment */
   4651			mmc->max_seg_size = 65535;
   4652		} else {
   4653			mmc->max_seg_size = 65536;
   4654		}
   4655	} else {
   4656		mmc->max_seg_size = mmc->max_req_size;
   4657	}
   4658
   4659	/*
   4660	 * Maximum block size. This varies from controller to controller and
   4661	 * is specified in the capabilities register.
   4662	 */
   4663	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
   4664		mmc->max_blk_size = 2;
   4665	} else {
   4666		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
   4667				SDHCI_MAX_BLOCK_SHIFT;
   4668		if (mmc->max_blk_size >= 3) {
   4669			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
   4670				mmc_hostname(mmc));
   4671			mmc->max_blk_size = 0;
   4672		}
   4673	}
   4674
   4675	mmc->max_blk_size = 512 << mmc->max_blk_size;
   4676
   4677	/*
   4678	 * Maximum block count.
   4679	 */
   4680	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
   4681
   4682	if (mmc->max_segs == 1)
   4683		/* This may alter mmc->*_blk_* parameters */
   4684		sdhci_allocate_bounce_buffer(host);
   4685
   4686	return 0;
   4687
   4688unreg:
   4689	if (host->sdhci_core_to_disable_vqmmc)
   4690		regulator_disable(mmc->supply.vqmmc);
   4691undma:
   4692	if (host->align_buffer)
   4693		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
   4694				  host->adma_table_sz, host->align_buffer,
   4695				  host->align_addr);
   4696	host->adma_table = NULL;
   4697	host->align_buffer = NULL;
   4698
   4699	return ret;
   4700}
   4701EXPORT_SYMBOL_GPL(sdhci_setup_host);
   4702
   4703void sdhci_cleanup_host(struct sdhci_host *host)
   4704{
   4705	struct mmc_host *mmc = host->mmc;
   4706
   4707	if (host->sdhci_core_to_disable_vqmmc)
   4708		regulator_disable(mmc->supply.vqmmc);
   4709
   4710	if (host->align_buffer)
   4711		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
   4712				  host->adma_table_sz, host->align_buffer,
   4713				  host->align_addr);
   4714
   4715	if (host->use_external_dma)
   4716		sdhci_external_dma_release(host);
   4717
   4718	host->adma_table = NULL;
   4719	host->align_buffer = NULL;
   4720}
   4721EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
   4722
   4723int __sdhci_add_host(struct sdhci_host *host)
   4724{
   4725	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
   4726	struct mmc_host *mmc = host->mmc;
   4727	int ret;
   4728
   4729	if ((mmc->caps2 & MMC_CAP2_CQE) &&
   4730	    (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
   4731		mmc->caps2 &= ~MMC_CAP2_CQE;
   4732		mmc->cqe_ops = NULL;
   4733	}
   4734
   4735	host->complete_wq = alloc_workqueue("sdhci", flags, 0);
   4736	if (!host->complete_wq)
   4737		return -ENOMEM;
   4738
   4739	INIT_WORK(&host->complete_work, sdhci_complete_work);
   4740
   4741	timer_setup(&host->timer, sdhci_timeout_timer, 0);
   4742	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
   4743
   4744	init_waitqueue_head(&host->buf_ready_int);
   4745
   4746	sdhci_init(host, 0);
   4747
   4748	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
   4749				   IRQF_SHARED,	mmc_hostname(mmc), host);
   4750	if (ret) {
   4751		pr_err("%s: Failed to request IRQ %d: %d\n",
   4752		       mmc_hostname(mmc), host->irq, ret);
   4753		goto unwq;
   4754	}
   4755
   4756	ret = sdhci_led_register(host);
   4757	if (ret) {
   4758		pr_err("%s: Failed to register LED device: %d\n",
   4759		       mmc_hostname(mmc), ret);
   4760		goto unirq;
   4761	}
   4762
   4763	ret = mmc_add_host(mmc);
   4764	if (ret)
   4765		goto unled;
   4766
   4767	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
   4768		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
   4769		host->use_external_dma ? "External DMA" :
   4770		(host->flags & SDHCI_USE_ADMA) ?
   4771		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
   4772		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
   4773
   4774	sdhci_enable_card_detection(host);
   4775
   4776	return 0;
   4777
   4778unled:
   4779	sdhci_led_unregister(host);
   4780unirq:
   4781	sdhci_do_reset(host, SDHCI_RESET_ALL);
   4782	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
   4783	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
   4784	free_irq(host->irq, host);
   4785unwq:
   4786	destroy_workqueue(host->complete_wq);
   4787
   4788	return ret;
   4789}
   4790EXPORT_SYMBOL_GPL(__sdhci_add_host);
   4791
   4792int sdhci_add_host(struct sdhci_host *host)
   4793{
   4794	int ret;
   4795
   4796	ret = sdhci_setup_host(host);
   4797	if (ret)
   4798		return ret;
   4799
   4800	ret = __sdhci_add_host(host);
   4801	if (ret)
   4802		goto cleanup;
   4803
   4804	return 0;
   4805
   4806cleanup:
   4807	sdhci_cleanup_host(host);
   4808
   4809	return ret;
   4810}
   4811EXPORT_SYMBOL_GPL(sdhci_add_host);
   4812
   4813void sdhci_remove_host(struct sdhci_host *host, int dead)
   4814{
   4815	struct mmc_host *mmc = host->mmc;
   4816	unsigned long flags;
   4817
   4818	if (dead) {
   4819		spin_lock_irqsave(&host->lock, flags);
   4820
   4821		host->flags |= SDHCI_DEVICE_DEAD;
   4822
   4823		if (sdhci_has_requests(host)) {
   4824			pr_err("%s: Controller removed during "
   4825				" transfer!\n", mmc_hostname(mmc));
   4826			sdhci_error_out_mrqs(host, -ENOMEDIUM);
   4827		}
   4828
   4829		spin_unlock_irqrestore(&host->lock, flags);
   4830	}
   4831
   4832	sdhci_disable_card_detection(host);
   4833
   4834	mmc_remove_host(mmc);
   4835
   4836	sdhci_led_unregister(host);
   4837
   4838	if (!dead)
   4839		sdhci_do_reset(host, SDHCI_RESET_ALL);
   4840
   4841	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
   4842	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
   4843	free_irq(host->irq, host);
   4844
   4845	del_timer_sync(&host->timer);
   4846	del_timer_sync(&host->data_timer);
   4847
   4848	destroy_workqueue(host->complete_wq);
   4849
   4850	if (host->sdhci_core_to_disable_vqmmc)
   4851		regulator_disable(mmc->supply.vqmmc);
   4852
   4853	if (host->align_buffer)
   4854		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
   4855				  host->adma_table_sz, host->align_buffer,
   4856				  host->align_addr);
   4857
   4858	if (host->use_external_dma)
   4859		sdhci_external_dma_release(host);
   4860
   4861	host->adma_table = NULL;
   4862	host->align_buffer = NULL;
   4863}
   4864
   4865EXPORT_SYMBOL_GPL(sdhci_remove_host);
   4866
   4867void sdhci_free_host(struct sdhci_host *host)
   4868{
   4869	mmc_free_host(host->mmc);
   4870}
   4871
   4872EXPORT_SYMBOL_GPL(sdhci_free_host);
   4873
   4874/*****************************************************************************\
   4875 *                                                                           *
   4876 * Driver init/exit                                                          *
   4877 *                                                                           *
   4878\*****************************************************************************/
   4879
   4880static int __init sdhci_drv_init(void)
   4881{
   4882	pr_info(DRIVER_NAME
   4883		": Secure Digital Host Controller Interface driver\n");
   4884	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
   4885
   4886	return 0;
   4887}
   4888
   4889static void __exit sdhci_drv_exit(void)
   4890{
   4891}
   4892
   4893module_init(sdhci_drv_init);
   4894module_exit(sdhci_drv_exit);
   4895
   4896module_param(debug_quirks, uint, 0444);
   4897module_param(debug_quirks2, uint, 0444);
   4898
   4899MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
   4900MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
   4901MODULE_LICENSE("GPL");
   4902
   4903MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
   4904MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");