cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmci.c (63037B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
      4 *
      5 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
      6 *  Copyright (C) 2010 ST-Ericsson SA
      7 */
      8#include <linux/module.h>
      9#include <linux/moduleparam.h>
     10#include <linux/init.h>
     11#include <linux/ioport.h>
     12#include <linux/device.h>
     13#include <linux/io.h>
     14#include <linux/interrupt.h>
     15#include <linux/kernel.h>
     16#include <linux/slab.h>
     17#include <linux/delay.h>
     18#include <linux/err.h>
     19#include <linux/highmem.h>
     20#include <linux/log2.h>
     21#include <linux/mmc/mmc.h>
     22#include <linux/mmc/pm.h>
     23#include <linux/mmc/host.h>
     24#include <linux/mmc/card.h>
     25#include <linux/mmc/sd.h>
     26#include <linux/mmc/slot-gpio.h>
     27#include <linux/amba/bus.h>
     28#include <linux/clk.h>
     29#include <linux/scatterlist.h>
     30#include <linux/of.h>
     31#include <linux/regulator/consumer.h>
     32#include <linux/dmaengine.h>
     33#include <linux/dma-mapping.h>
     34#include <linux/amba/mmci.h>
     35#include <linux/pm_runtime.h>
     36#include <linux/types.h>
     37#include <linux/pinctrl/consumer.h>
     38#include <linux/reset.h>
     39#include <linux/gpio/consumer.h>
     40
     41#include <asm/div64.h>
     42#include <asm/io.h>
     43
     44#include "mmci.h"
     45
     46#define DRIVER_NAME "mmci-pl18x"
     47
     48static void mmci_variant_init(struct mmci_host *host);
     49static void ux500_variant_init(struct mmci_host *host);
     50static void ux500v2_variant_init(struct mmci_host *host);
     51
     52static unsigned int fmax = 515633;
     53
     54static struct variant_data variant_arm = {
     55	.fifosize		= 16 * 4,
     56	.fifohalfsize		= 8 * 4,
     57	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
     58	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
     59	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
     60	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
     61	.datalength_bits	= 16,
     62	.datactrl_blocksz	= 11,
     63	.pwrreg_powerup		= MCI_PWR_UP,
     64	.f_max			= 100000000,
     65	.reversed_irq_handling	= true,
     66	.mmcimask1		= true,
     67	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
     68	.start_err		= MCI_STARTBITERR,
     69	.opendrain		= MCI_ROD,
     70	.init			= mmci_variant_init,
     71};
     72
     73static struct variant_data variant_arm_extended_fifo = {
     74	.fifosize		= 128 * 4,
     75	.fifohalfsize		= 64 * 4,
     76	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
     77	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
     78	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
     79	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
     80	.datalength_bits	= 16,
     81	.datactrl_blocksz	= 11,
     82	.pwrreg_powerup		= MCI_PWR_UP,
     83	.f_max			= 100000000,
     84	.mmcimask1		= true,
     85	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
     86	.start_err		= MCI_STARTBITERR,
     87	.opendrain		= MCI_ROD,
     88	.init			= mmci_variant_init,
     89};
     90
     91static struct variant_data variant_arm_extended_fifo_hwfc = {
     92	.fifosize		= 128 * 4,
     93	.fifohalfsize		= 64 * 4,
     94	.clkreg_enable		= MCI_ARM_HWFCEN,
     95	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
     96	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
     97	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
     98	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
     99	.datalength_bits	= 16,
    100	.datactrl_blocksz	= 11,
    101	.pwrreg_powerup		= MCI_PWR_UP,
    102	.f_max			= 100000000,
    103	.mmcimask1		= true,
    104	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    105	.start_err		= MCI_STARTBITERR,
    106	.opendrain		= MCI_ROD,
    107	.init			= mmci_variant_init,
    108};
    109
    110static struct variant_data variant_u300 = {
    111	.fifosize		= 16 * 4,
    112	.fifohalfsize		= 8 * 4,
    113	.clkreg_enable		= MCI_ST_U300_HWFCEN,
    114	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
    115	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
    116	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
    117	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
    118	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
    119	.datalength_bits	= 16,
    120	.datactrl_blocksz	= 11,
    121	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    122	.st_sdio			= true,
    123	.pwrreg_powerup		= MCI_PWR_ON,
    124	.f_max			= 100000000,
    125	.signal_direction	= true,
    126	.pwrreg_clkgate		= true,
    127	.pwrreg_nopower		= true,
    128	.mmcimask1		= true,
    129	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    130	.start_err		= MCI_STARTBITERR,
    131	.opendrain		= MCI_OD,
    132	.init			= mmci_variant_init,
    133};
    134
    135static struct variant_data variant_nomadik = {
    136	.fifosize		= 16 * 4,
    137	.fifohalfsize		= 8 * 4,
    138	.clkreg			= MCI_CLK_ENABLE,
    139	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
    140	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
    141	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
    142	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
    143	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
    144	.datalength_bits	= 24,
    145	.datactrl_blocksz	= 11,
    146	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    147	.st_sdio		= true,
    148	.st_clkdiv		= true,
    149	.pwrreg_powerup		= MCI_PWR_ON,
    150	.f_max			= 100000000,
    151	.signal_direction	= true,
    152	.pwrreg_clkgate		= true,
    153	.pwrreg_nopower		= true,
    154	.mmcimask1		= true,
    155	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    156	.start_err		= MCI_STARTBITERR,
    157	.opendrain		= MCI_OD,
    158	.init			= mmci_variant_init,
    159};
    160
    161static struct variant_data variant_ux500 = {
    162	.fifosize		= 30 * 4,
    163	.fifohalfsize		= 8 * 4,
    164	.clkreg			= MCI_CLK_ENABLE,
    165	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
    166	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
    167	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
    168	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
    169	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
    170	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
    171	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
    172	.datalength_bits	= 24,
    173	.datactrl_blocksz	= 11,
    174	.datactrl_any_blocksz	= true,
    175	.dma_power_of_2		= true,
    176	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    177	.st_sdio		= true,
    178	.st_clkdiv		= true,
    179	.pwrreg_powerup		= MCI_PWR_ON,
    180	.f_max			= 100000000,
    181	.signal_direction	= true,
    182	.pwrreg_clkgate		= true,
    183	.busy_detect		= true,
    184	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
    185	.busy_detect_flag	= MCI_ST_CARDBUSY,
    186	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
    187	.pwrreg_nopower		= true,
    188	.mmcimask1		= true,
    189	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    190	.start_err		= MCI_STARTBITERR,
    191	.opendrain		= MCI_OD,
    192	.init			= ux500_variant_init,
    193};
    194
    195static struct variant_data variant_ux500v2 = {
    196	.fifosize		= 30 * 4,
    197	.fifohalfsize		= 8 * 4,
    198	.clkreg			= MCI_CLK_ENABLE,
    199	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
    200	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
    201	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
    202	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
    203	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
    204	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
    205	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
    206	.datactrl_mask_ddrmode	= MCI_DPSM_ST_DDRMODE,
    207	.datalength_bits	= 24,
    208	.datactrl_blocksz	= 11,
    209	.datactrl_any_blocksz	= true,
    210	.dma_power_of_2		= true,
    211	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    212	.st_sdio		= true,
    213	.st_clkdiv		= true,
    214	.pwrreg_powerup		= MCI_PWR_ON,
    215	.f_max			= 100000000,
    216	.signal_direction	= true,
    217	.pwrreg_clkgate		= true,
    218	.busy_detect		= true,
    219	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
    220	.busy_detect_flag	= MCI_ST_CARDBUSY,
    221	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
    222	.pwrreg_nopower		= true,
    223	.mmcimask1		= true,
    224	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    225	.start_err		= MCI_STARTBITERR,
    226	.opendrain		= MCI_OD,
    227	.init			= ux500v2_variant_init,
    228};
    229
    230static struct variant_data variant_stm32 = {
    231	.fifosize		= 32 * 4,
    232	.fifohalfsize		= 8 * 4,
    233	.clkreg			= MCI_CLK_ENABLE,
    234	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
    235	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
    236	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
    237	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
    238	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
    239	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
    240	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
    241	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    242	.datalength_bits	= 24,
    243	.datactrl_blocksz	= 11,
    244	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    245	.st_sdio		= true,
    246	.st_clkdiv		= true,
    247	.pwrreg_powerup		= MCI_PWR_ON,
    248	.f_max			= 48000000,
    249	.pwrreg_clkgate		= true,
    250	.pwrreg_nopower		= true,
    251	.init			= mmci_variant_init,
    252};
    253
    254static struct variant_data variant_stm32_sdmmc = {
    255	.fifosize		= 16 * 4,
    256	.fifohalfsize		= 8 * 4,
    257	.f_max			= 208000000,
    258	.stm32_clkdiv		= true,
    259	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
    260	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
    261	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
    262	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
    263	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
    264	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
    265	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
    266	.datactrl_first		= true,
    267	.datacnt_useless	= true,
    268	.datalength_bits	= 25,
    269	.datactrl_blocksz	= 14,
    270	.datactrl_any_blocksz	= true,
    271	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    272	.stm32_idmabsize_mask	= GENMASK(12, 5),
    273	.busy_timeout		= true,
    274	.busy_detect		= true,
    275	.busy_detect_flag	= MCI_STM32_BUSYD0,
    276	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
    277	.init			= sdmmc_variant_init,
    278};
    279
    280static struct variant_data variant_stm32_sdmmcv2 = {
    281	.fifosize		= 16 * 4,
    282	.fifohalfsize		= 8 * 4,
    283	.f_max			= 267000000,
    284	.stm32_clkdiv		= true,
    285	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
    286	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
    287	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
    288	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
    289	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
    290	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
    291	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
    292	.datactrl_first		= true,
    293	.datacnt_useless	= true,
    294	.datalength_bits	= 25,
    295	.datactrl_blocksz	= 14,
    296	.datactrl_any_blocksz	= true,
    297	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
    298	.stm32_idmabsize_mask	= GENMASK(16, 5),
    299	.dma_lli		= true,
    300	.busy_timeout		= true,
    301	.busy_detect		= true,
    302	.busy_detect_flag	= MCI_STM32_BUSYD0,
    303	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
    304	.init			= sdmmc_variant_init,
    305};
    306
    307static struct variant_data variant_qcom = {
    308	.fifosize		= 16 * 4,
    309	.fifohalfsize		= 8 * 4,
    310	.clkreg			= MCI_CLK_ENABLE,
    311	.clkreg_enable		= MCI_QCOM_CLK_FLOWENA |
    312				  MCI_QCOM_CLK_SELECT_IN_FBCLK,
    313	.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
    314	.datactrl_mask_ddrmode	= MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
    315	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
    316	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
    317	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
    318	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
    319	.data_cmd_enable	= MCI_CPSM_QCOM_DATCMD,
    320	.datalength_bits	= 24,
    321	.datactrl_blocksz	= 11,
    322	.datactrl_any_blocksz	= true,
    323	.pwrreg_powerup		= MCI_PWR_UP,
    324	.f_max			= 208000000,
    325	.explicit_mclk_control	= true,
    326	.qcom_fifo		= true,
    327	.qcom_dml		= true,
    328	.mmcimask1		= true,
    329	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
    330	.start_err		= MCI_STARTBITERR,
    331	.opendrain		= MCI_ROD,
    332	.init			= qcom_variant_init,
    333};
    334
    335/* Busy detection for the ST Micro variant */
    336static int mmci_card_busy(struct mmc_host *mmc)
    337{
    338	struct mmci_host *host = mmc_priv(mmc);
    339	unsigned long flags;
    340	int busy = 0;
    341
    342	spin_lock_irqsave(&host->lock, flags);
    343	if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
    344		busy = 1;
    345	spin_unlock_irqrestore(&host->lock, flags);
    346
    347	return busy;
    348}
    349
    350static void mmci_reg_delay(struct mmci_host *host)
    351{
    352	/*
    353	 * According to the spec, at least three feedback clock cycles
    354	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
    355	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
    356	 * Worst delay time during card init is at 100 kHz => 30 us.
    357	 * Worst delay time when up and running is at 25 MHz => 120 ns.
    358	 */
    359	if (host->cclk < 25000000)
    360		udelay(30);
    361	else
    362		ndelay(120);
    363}
    364
    365/*
    366 * This must be called with host->lock held
    367 */
    368void mmci_write_clkreg(struct mmci_host *host, u32 clk)
    369{
    370	if (host->clk_reg != clk) {
    371		host->clk_reg = clk;
    372		writel(clk, host->base + MMCICLOCK);
    373	}
    374}
    375
    376/*
    377 * This must be called with host->lock held
    378 */
    379void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
    380{
    381	if (host->pwr_reg != pwr) {
    382		host->pwr_reg = pwr;
    383		writel(pwr, host->base + MMCIPOWER);
    384	}
    385}
    386
    387/*
    388 * This must be called with host->lock held
    389 */
    390static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
    391{
    392	/* Keep busy mode in DPSM if enabled */
    393	datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
    394
    395	if (host->datactrl_reg != datactrl) {
    396		host->datactrl_reg = datactrl;
    397		writel(datactrl, host->base + MMCIDATACTRL);
    398	}
    399}
    400
    401/*
    402 * This must be called with host->lock held
    403 */
    404static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
    405{
    406	struct variant_data *variant = host->variant;
    407	u32 clk = variant->clkreg;
    408
    409	/* Make sure cclk reflects the current calculated clock */
    410	host->cclk = 0;
    411
    412	if (desired) {
    413		if (variant->explicit_mclk_control) {
    414			host->cclk = host->mclk;
    415		} else if (desired >= host->mclk) {
    416			clk = MCI_CLK_BYPASS;
    417			if (variant->st_clkdiv)
    418				clk |= MCI_ST_UX500_NEG_EDGE;
    419			host->cclk = host->mclk;
    420		} else if (variant->st_clkdiv) {
    421			/*
    422			 * DB8500 TRM says f = mclk / (clkdiv + 2)
    423			 * => clkdiv = (mclk / f) - 2
    424			 * Round the divider up so we don't exceed the max
    425			 * frequency
    426			 */
    427			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
    428			if (clk >= 256)
    429				clk = 255;
    430			host->cclk = host->mclk / (clk + 2);
    431		} else {
    432			/*
    433			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
    434			 * => clkdiv = mclk / (2 * f) - 1
    435			 */
    436			clk = host->mclk / (2 * desired) - 1;
    437			if (clk >= 256)
    438				clk = 255;
    439			host->cclk = host->mclk / (2 * (clk + 1));
    440		}
    441
    442		clk |= variant->clkreg_enable;
    443		clk |= MCI_CLK_ENABLE;
    444		/* This hasn't proven to be worthwhile */
    445		/* clk |= MCI_CLK_PWRSAVE; */
    446	}
    447
    448	/* Set actual clock for debug */
    449	host->mmc->actual_clock = host->cclk;
    450
    451	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
    452		clk |= MCI_4BIT_BUS;
    453	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
    454		clk |= variant->clkreg_8bit_bus_enable;
    455
    456	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
    457	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
    458		clk |= variant->clkreg_neg_edge_enable;
    459
    460	mmci_write_clkreg(host, clk);
    461}
    462
    463static void mmci_dma_release(struct mmci_host *host)
    464{
    465	if (host->ops && host->ops->dma_release)
    466		host->ops->dma_release(host);
    467
    468	host->use_dma = false;
    469}
    470
    471static void mmci_dma_setup(struct mmci_host *host)
    472{
    473	if (!host->ops || !host->ops->dma_setup)
    474		return;
    475
    476	if (host->ops->dma_setup(host))
    477		return;
    478
    479	/* initialize pre request cookie */
    480	host->next_cookie = 1;
    481
    482	host->use_dma = true;
    483}
    484
    485/*
    486 * Validate mmc prerequisites
    487 */
    488static int mmci_validate_data(struct mmci_host *host,
    489			      struct mmc_data *data)
    490{
    491	struct variant_data *variant = host->variant;
    492
    493	if (!data)
    494		return 0;
    495	if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
    496		dev_err(mmc_dev(host->mmc),
    497			"unsupported block size (%d bytes)\n", data->blksz);
    498		return -EINVAL;
    499	}
    500
    501	if (host->ops && host->ops->validate_data)
    502		return host->ops->validate_data(host, data);
    503
    504	return 0;
    505}
    506
    507static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
    508{
    509	int err;
    510
    511	if (!host->ops || !host->ops->prep_data)
    512		return 0;
    513
    514	err = host->ops->prep_data(host, data, next);
    515
    516	if (next && !err)
    517		data->host_cookie = ++host->next_cookie < 0 ?
    518			1 : host->next_cookie;
    519
    520	return err;
    521}
    522
    523static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
    524		      int err)
    525{
    526	if (host->ops && host->ops->unprep_data)
    527		host->ops->unprep_data(host, data, err);
    528
    529	data->host_cookie = 0;
    530}
    531
    532static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
    533{
    534	WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
    535
    536	if (host->ops && host->ops->get_next_data)
    537		host->ops->get_next_data(host, data);
    538}
    539
    540static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
    541{
    542	struct mmc_data *data = host->data;
    543	int ret;
    544
    545	if (!host->use_dma)
    546		return -EINVAL;
    547
    548	ret = mmci_prep_data(host, data, false);
    549	if (ret)
    550		return ret;
    551
    552	if (!host->ops || !host->ops->dma_start)
    553		return -EINVAL;
    554
    555	/* Okay, go for it. */
    556	dev_vdbg(mmc_dev(host->mmc),
    557		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
    558		 data->sg_len, data->blksz, data->blocks, data->flags);
    559
    560	ret = host->ops->dma_start(host, &datactrl);
    561	if (ret)
    562		return ret;
    563
    564	/* Trigger the DMA transfer */
    565	mmci_write_datactrlreg(host, datactrl);
    566
    567	/*
    568	 * Let the MMCI say when the data is ended and it's time
    569	 * to fire next DMA request. When that happens, MMCI will
    570	 * call mmci_data_end()
    571	 */
    572	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
    573	       host->base + MMCIMASK0);
    574	return 0;
    575}
    576
    577static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
    578{
    579	if (!host->use_dma)
    580		return;
    581
    582	if (host->ops && host->ops->dma_finalize)
    583		host->ops->dma_finalize(host, data);
    584}
    585
    586static void mmci_dma_error(struct mmci_host *host)
    587{
    588	if (!host->use_dma)
    589		return;
    590
    591	if (host->ops && host->ops->dma_error)
    592		host->ops->dma_error(host);
    593}
    594
    595static void
    596mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
    597{
    598	writel(0, host->base + MMCICOMMAND);
    599
    600	BUG_ON(host->data);
    601
    602	host->mrq = NULL;
    603	host->cmd = NULL;
    604
    605	mmc_request_done(host->mmc, mrq);
    606}
    607
    608static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
    609{
    610	void __iomem *base = host->base;
    611	struct variant_data *variant = host->variant;
    612
    613	if (host->singleirq) {
    614		unsigned int mask0 = readl(base + MMCIMASK0);
    615
    616		mask0 &= ~variant->irq_pio_mask;
    617		mask0 |= mask;
    618
    619		writel(mask0, base + MMCIMASK0);
    620	}
    621
    622	if (variant->mmcimask1)
    623		writel(mask, base + MMCIMASK1);
    624
    625	host->mask1_reg = mask;
    626}
    627
    628static void mmci_stop_data(struct mmci_host *host)
    629{
    630	mmci_write_datactrlreg(host, 0);
    631	mmci_set_mask1(host, 0);
    632	host->data = NULL;
    633}
    634
    635static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
    636{
    637	unsigned int flags = SG_MITER_ATOMIC;
    638
    639	if (data->flags & MMC_DATA_READ)
    640		flags |= SG_MITER_TO_SG;
    641	else
    642		flags |= SG_MITER_FROM_SG;
    643
    644	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
    645}
    646
    647static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
    648{
    649	return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
    650}
    651
    652static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
    653{
    654	return MCI_DPSM_ENABLE | (host->data->blksz << 16);
    655}
    656
    657static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
    658{
    659	void __iomem *base = host->base;
    660
    661	/*
    662	 * Before unmasking for the busy end IRQ, confirm that the
    663	 * command was sent successfully. To keep track of having a
    664	 * command in-progress, waiting for busy signaling to end,
    665	 * store the status in host->busy_status.
    666	 *
    667	 * Note that, the card may need a couple of clock cycles before
    668	 * it starts signaling busy on DAT0, hence re-read the
    669	 * MMCISTATUS register here, to allow the busy bit to be set.
    670	 * Potentially we may even need to poll the register for a
    671	 * while, to allow it to be set, but tests indicates that it
    672	 * isn't needed.
    673	 */
    674	if (!host->busy_status && !(status & err_msk) &&
    675	    (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
    676		writel(readl(base + MMCIMASK0) |
    677		       host->variant->busy_detect_mask,
    678		       base + MMCIMASK0);
    679
    680		host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
    681		return false;
    682	}
    683
    684	/*
    685	 * If there is a command in-progress that has been successfully
    686	 * sent, then bail out if busy status is set and wait for the
    687	 * busy end IRQ.
    688	 *
    689	 * Note that, the HW triggers an IRQ on both edges while
    690	 * monitoring DAT0 for busy completion, but there is only one
    691	 * status bit in MMCISTATUS for the busy state. Therefore
    692	 * both the start and the end interrupts needs to be cleared,
    693	 * one after the other. So, clear the busy start IRQ here.
    694	 */
    695	if (host->busy_status &&
    696	    (status & host->variant->busy_detect_flag)) {
    697		writel(host->variant->busy_detect_mask, base + MMCICLEAR);
    698		return false;
    699	}
    700
    701	/*
    702	 * If there is a command in-progress that has been successfully
    703	 * sent and the busy bit isn't set, it means we have received
    704	 * the busy end IRQ. Clear and mask the IRQ, then continue to
    705	 * process the command.
    706	 */
    707	if (host->busy_status) {
    708		writel(host->variant->busy_detect_mask, base + MMCICLEAR);
    709
    710		writel(readl(base + MMCIMASK0) &
    711		       ~host->variant->busy_detect_mask, base + MMCIMASK0);
    712		host->busy_status = 0;
    713	}
    714
    715	return true;
    716}
    717
    718/*
    719 * All the DMA operation mode stuff goes inside this ifdef.
    720 * This assumes that you have a generic DMA device interface,
    721 * no custom DMA interfaces are supported.
    722 */
    723#ifdef CONFIG_DMA_ENGINE
    724struct mmci_dmae_next {
    725	struct dma_async_tx_descriptor *desc;
    726	struct dma_chan	*chan;
    727};
    728
    729struct mmci_dmae_priv {
    730	struct dma_chan	*cur;
    731	struct dma_chan	*rx_channel;
    732	struct dma_chan	*tx_channel;
    733	struct dma_async_tx_descriptor	*desc_current;
    734	struct mmci_dmae_next next_data;
    735};
    736
    737int mmci_dmae_setup(struct mmci_host *host)
    738{
    739	const char *rxname, *txname;
    740	struct mmci_dmae_priv *dmae;
    741
    742	dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
    743	if (!dmae)
    744		return -ENOMEM;
    745
    746	host->dma_priv = dmae;
    747
    748	dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
    749	if (IS_ERR(dmae->rx_channel)) {
    750		int ret = PTR_ERR(dmae->rx_channel);
    751		dmae->rx_channel = NULL;
    752		return ret;
    753	}
    754
    755	dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
    756	if (IS_ERR(dmae->tx_channel)) {
    757		if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
    758			dev_warn(mmc_dev(host->mmc),
    759				 "Deferred probe for TX channel ignored\n");
    760		dmae->tx_channel = NULL;
    761	}
    762
    763	/*
    764	 * If only an RX channel is specified, the driver will
    765	 * attempt to use it bidirectionally, however if it is
    766	 * is specified but cannot be located, DMA will be disabled.
    767	 */
    768	if (dmae->rx_channel && !dmae->tx_channel)
    769		dmae->tx_channel = dmae->rx_channel;
    770
    771	if (dmae->rx_channel)
    772		rxname = dma_chan_name(dmae->rx_channel);
    773	else
    774		rxname = "none";
    775
    776	if (dmae->tx_channel)
    777		txname = dma_chan_name(dmae->tx_channel);
    778	else
    779		txname = "none";
    780
    781	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
    782		 rxname, txname);
    783
    784	/*
    785	 * Limit the maximum segment size in any SG entry according to
    786	 * the parameters of the DMA engine device.
    787	 */
    788	if (dmae->tx_channel) {
    789		struct device *dev = dmae->tx_channel->device->dev;
    790		unsigned int max_seg_size = dma_get_max_seg_size(dev);
    791
    792		if (max_seg_size < host->mmc->max_seg_size)
    793			host->mmc->max_seg_size = max_seg_size;
    794	}
    795	if (dmae->rx_channel) {
    796		struct device *dev = dmae->rx_channel->device->dev;
    797		unsigned int max_seg_size = dma_get_max_seg_size(dev);
    798
    799		if (max_seg_size < host->mmc->max_seg_size)
    800			host->mmc->max_seg_size = max_seg_size;
    801	}
    802
    803	if (!dmae->tx_channel || !dmae->rx_channel) {
    804		mmci_dmae_release(host);
    805		return -EINVAL;
    806	}
    807
    808	return 0;
    809}
    810
    811/*
    812 * This is used in or so inline it
    813 * so it can be discarded.
    814 */
    815void mmci_dmae_release(struct mmci_host *host)
    816{
    817	struct mmci_dmae_priv *dmae = host->dma_priv;
    818
    819	if (dmae->rx_channel)
    820		dma_release_channel(dmae->rx_channel);
    821	if (dmae->tx_channel)
    822		dma_release_channel(dmae->tx_channel);
    823	dmae->rx_channel = dmae->tx_channel = NULL;
    824}
    825
    826static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
    827{
    828	struct mmci_dmae_priv *dmae = host->dma_priv;
    829	struct dma_chan *chan;
    830
    831	if (data->flags & MMC_DATA_READ)
    832		chan = dmae->rx_channel;
    833	else
    834		chan = dmae->tx_channel;
    835
    836	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
    837		     mmc_get_dma_dir(data));
    838}
    839
    840void mmci_dmae_error(struct mmci_host *host)
    841{
    842	struct mmci_dmae_priv *dmae = host->dma_priv;
    843
    844	if (!dma_inprogress(host))
    845		return;
    846
    847	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
    848	dmaengine_terminate_all(dmae->cur);
    849	host->dma_in_progress = false;
    850	dmae->cur = NULL;
    851	dmae->desc_current = NULL;
    852	host->data->host_cookie = 0;
    853
    854	mmci_dma_unmap(host, host->data);
    855}
    856
    857void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
    858{
    859	struct mmci_dmae_priv *dmae = host->dma_priv;
    860	u32 status;
    861	int i;
    862
    863	if (!dma_inprogress(host))
    864		return;
    865
    866	/* Wait up to 1ms for the DMA to complete */
    867	for (i = 0; ; i++) {
    868		status = readl(host->base + MMCISTATUS);
    869		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
    870			break;
    871		udelay(10);
    872	}
    873
    874	/*
    875	 * Check to see whether we still have some data left in the FIFO -
    876	 * this catches DMA controllers which are unable to monitor the
    877	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
    878	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
    879	 */
    880	if (status & MCI_RXDATAAVLBLMASK) {
    881		mmci_dma_error(host);
    882		if (!data->error)
    883			data->error = -EIO;
    884	} else if (!data->host_cookie) {
    885		mmci_dma_unmap(host, data);
    886	}
    887
    888	/*
    889	 * Use of DMA with scatter-gather is impossible.
    890	 * Give up with DMA and switch back to PIO mode.
    891	 */
    892	if (status & MCI_RXDATAAVLBLMASK) {
    893		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
    894		mmci_dma_release(host);
    895	}
    896
    897	host->dma_in_progress = false;
    898	dmae->cur = NULL;
    899	dmae->desc_current = NULL;
    900}
    901
    902/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
    903static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
    904				struct dma_chan **dma_chan,
    905				struct dma_async_tx_descriptor **dma_desc)
    906{
    907	struct mmci_dmae_priv *dmae = host->dma_priv;
    908	struct variant_data *variant = host->variant;
    909	struct dma_slave_config conf = {
    910		.src_addr = host->phybase + MMCIFIFO,
    911		.dst_addr = host->phybase + MMCIFIFO,
    912		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
    913		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
    914		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
    915		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
    916		.device_fc = false,
    917	};
    918	struct dma_chan *chan;
    919	struct dma_device *device;
    920	struct dma_async_tx_descriptor *desc;
    921	int nr_sg;
    922	unsigned long flags = DMA_CTRL_ACK;
    923
    924	if (data->flags & MMC_DATA_READ) {
    925		conf.direction = DMA_DEV_TO_MEM;
    926		chan = dmae->rx_channel;
    927	} else {
    928		conf.direction = DMA_MEM_TO_DEV;
    929		chan = dmae->tx_channel;
    930	}
    931
    932	/* If there's no DMA channel, fall back to PIO */
    933	if (!chan)
    934		return -EINVAL;
    935
    936	/* If less than or equal to the fifo size, don't bother with DMA */
    937	if (data->blksz * data->blocks <= variant->fifosize)
    938		return -EINVAL;
    939
    940	/*
    941	 * This is necessary to get SDIO working on the Ux500. We do not yet
    942	 * know if this is a bug in:
    943	 * - The Ux500 DMA controller (DMA40)
    944	 * - The MMCI DMA interface on the Ux500
    945	 * some power of two blocks (such as 64 bytes) are sent regularly
    946	 * during SDIO traffic and those work fine so for these we enable DMA
    947	 * transfers.
    948	 */
    949	if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
    950		return -EINVAL;
    951
    952	device = chan->device;
    953	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
    954			   mmc_get_dma_dir(data));
    955	if (nr_sg == 0)
    956		return -EINVAL;
    957
    958	if (host->variant->qcom_dml)
    959		flags |= DMA_PREP_INTERRUPT;
    960
    961	dmaengine_slave_config(chan, &conf);
    962	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
    963					    conf.direction, flags);
    964	if (!desc)
    965		goto unmap_exit;
    966
    967	*dma_chan = chan;
    968	*dma_desc = desc;
    969
    970	return 0;
    971
    972 unmap_exit:
    973	dma_unmap_sg(device->dev, data->sg, data->sg_len,
    974		     mmc_get_dma_dir(data));
    975	return -ENOMEM;
    976}
    977
    978int mmci_dmae_prep_data(struct mmci_host *host,
    979			struct mmc_data *data,
    980			bool next)
    981{
    982	struct mmci_dmae_priv *dmae = host->dma_priv;
    983	struct mmci_dmae_next *nd = &dmae->next_data;
    984
    985	if (!host->use_dma)
    986		return -EINVAL;
    987
    988	if (next)
    989		return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
    990	/* Check if next job is already prepared. */
    991	if (dmae->cur && dmae->desc_current)
    992		return 0;
    993
    994	/* No job were prepared thus do it now. */
    995	return _mmci_dmae_prep_data(host, data, &dmae->cur,
    996				    &dmae->desc_current);
    997}
    998
    999int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
   1000{
   1001	struct mmci_dmae_priv *dmae = host->dma_priv;
   1002	int ret;
   1003
   1004	host->dma_in_progress = true;
   1005	ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
   1006	if (ret < 0) {
   1007		host->dma_in_progress = false;
   1008		return ret;
   1009	}
   1010	dma_async_issue_pending(dmae->cur);
   1011
   1012	*datactrl |= MCI_DPSM_DMAENABLE;
   1013
   1014	return 0;
   1015}
   1016
   1017void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
   1018{
   1019	struct mmci_dmae_priv *dmae = host->dma_priv;
   1020	struct mmci_dmae_next *next = &dmae->next_data;
   1021
   1022	if (!host->use_dma)
   1023		return;
   1024
   1025	WARN_ON(!data->host_cookie && (next->desc || next->chan));
   1026
   1027	dmae->desc_current = next->desc;
   1028	dmae->cur = next->chan;
   1029	next->desc = NULL;
   1030	next->chan = NULL;
   1031}
   1032
   1033void mmci_dmae_unprep_data(struct mmci_host *host,
   1034			   struct mmc_data *data, int err)
   1035
   1036{
   1037	struct mmci_dmae_priv *dmae = host->dma_priv;
   1038
   1039	if (!host->use_dma)
   1040		return;
   1041
   1042	mmci_dma_unmap(host, data);
   1043
   1044	if (err) {
   1045		struct mmci_dmae_next *next = &dmae->next_data;
   1046		struct dma_chan *chan;
   1047		if (data->flags & MMC_DATA_READ)
   1048			chan = dmae->rx_channel;
   1049		else
   1050			chan = dmae->tx_channel;
   1051		dmaengine_terminate_all(chan);
   1052
   1053		if (dmae->desc_current == next->desc)
   1054			dmae->desc_current = NULL;
   1055
   1056		if (dmae->cur == next->chan) {
   1057			host->dma_in_progress = false;
   1058			dmae->cur = NULL;
   1059		}
   1060
   1061		next->desc = NULL;
   1062		next->chan = NULL;
   1063	}
   1064}
   1065
   1066static struct mmci_host_ops mmci_variant_ops = {
   1067	.prep_data = mmci_dmae_prep_data,
   1068	.unprep_data = mmci_dmae_unprep_data,
   1069	.get_datactrl_cfg = mmci_get_dctrl_cfg,
   1070	.get_next_data = mmci_dmae_get_next_data,
   1071	.dma_setup = mmci_dmae_setup,
   1072	.dma_release = mmci_dmae_release,
   1073	.dma_start = mmci_dmae_start,
   1074	.dma_finalize = mmci_dmae_finalize,
   1075	.dma_error = mmci_dmae_error,
   1076};
   1077#else
   1078static struct mmci_host_ops mmci_variant_ops = {
   1079	.get_datactrl_cfg = mmci_get_dctrl_cfg,
   1080};
   1081#endif
   1082
   1083static void mmci_variant_init(struct mmci_host *host)
   1084{
   1085	host->ops = &mmci_variant_ops;
   1086}
   1087
   1088static void ux500_variant_init(struct mmci_host *host)
   1089{
   1090	host->ops = &mmci_variant_ops;
   1091	host->ops->busy_complete = ux500_busy_complete;
   1092}
   1093
   1094static void ux500v2_variant_init(struct mmci_host *host)
   1095{
   1096	host->ops = &mmci_variant_ops;
   1097	host->ops->busy_complete = ux500_busy_complete;
   1098	host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
   1099}
   1100
   1101static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
   1102{
   1103	struct mmci_host *host = mmc_priv(mmc);
   1104	struct mmc_data *data = mrq->data;
   1105
   1106	if (!data)
   1107		return;
   1108
   1109	WARN_ON(data->host_cookie);
   1110
   1111	if (mmci_validate_data(host, data))
   1112		return;
   1113
   1114	mmci_prep_data(host, data, true);
   1115}
   1116
   1117static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
   1118			      int err)
   1119{
   1120	struct mmci_host *host = mmc_priv(mmc);
   1121	struct mmc_data *data = mrq->data;
   1122
   1123	if (!data || !data->host_cookie)
   1124		return;
   1125
   1126	mmci_unprep_data(host, data, err);
   1127}
   1128
   1129static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
   1130{
   1131	struct variant_data *variant = host->variant;
   1132	unsigned int datactrl, timeout, irqmask;
   1133	unsigned long long clks;
   1134	void __iomem *base;
   1135
   1136	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
   1137		data->blksz, data->blocks, data->flags);
   1138
   1139	host->data = data;
   1140	host->size = data->blksz * data->blocks;
   1141	data->bytes_xfered = 0;
   1142
   1143	clks = (unsigned long long)data->timeout_ns * host->cclk;
   1144	do_div(clks, NSEC_PER_SEC);
   1145
   1146	timeout = data->timeout_clks + (unsigned int)clks;
   1147
   1148	base = host->base;
   1149	writel(timeout, base + MMCIDATATIMER);
   1150	writel(host->size, base + MMCIDATALENGTH);
   1151
   1152	datactrl = host->ops->get_datactrl_cfg(host);
   1153	datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
   1154
   1155	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
   1156		u32 clk;
   1157
   1158		datactrl |= variant->datactrl_mask_sdio;
   1159
   1160		/*
   1161		 * The ST Micro variant for SDIO small write transfers
   1162		 * needs to have clock H/W flow control disabled,
   1163		 * otherwise the transfer will not start. The threshold
   1164		 * depends on the rate of MCLK.
   1165		 */
   1166		if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
   1167		    (host->size < 8 ||
   1168		     (host->size <= 8 && host->mclk > 50000000)))
   1169			clk = host->clk_reg & ~variant->clkreg_enable;
   1170		else
   1171			clk = host->clk_reg | variant->clkreg_enable;
   1172
   1173		mmci_write_clkreg(host, clk);
   1174	}
   1175
   1176	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
   1177	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
   1178		datactrl |= variant->datactrl_mask_ddrmode;
   1179
   1180	/*
   1181	 * Attempt to use DMA operation mode, if this
   1182	 * should fail, fall back to PIO mode
   1183	 */
   1184	if (!mmci_dma_start(host, datactrl))
   1185		return;
   1186
   1187	/* IRQ mode, map the SG list for CPU reading/writing */
   1188	mmci_init_sg(host, data);
   1189
   1190	if (data->flags & MMC_DATA_READ) {
   1191		irqmask = MCI_RXFIFOHALFFULLMASK;
   1192
   1193		/*
   1194		 * If we have less than the fifo 'half-full' threshold to
   1195		 * transfer, trigger a PIO interrupt as soon as any data
   1196		 * is available.
   1197		 */
   1198		if (host->size < variant->fifohalfsize)
   1199			irqmask |= MCI_RXDATAAVLBLMASK;
   1200	} else {
   1201		/*
   1202		 * We don't actually need to include "FIFO empty" here
   1203		 * since its implicit in "FIFO half empty".
   1204		 */
   1205		irqmask = MCI_TXFIFOHALFEMPTYMASK;
   1206	}
   1207
   1208	mmci_write_datactrlreg(host, datactrl);
   1209	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
   1210	mmci_set_mask1(host, irqmask);
   1211}
   1212
   1213static void
   1214mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
   1215{
   1216	void __iomem *base = host->base;
   1217	unsigned long long clks;
   1218
   1219	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
   1220	    cmd->opcode, cmd->arg, cmd->flags);
   1221
   1222	if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
   1223		writel(0, base + MMCICOMMAND);
   1224		mmci_reg_delay(host);
   1225	}
   1226
   1227	if (host->variant->cmdreg_stop &&
   1228	    cmd->opcode == MMC_STOP_TRANSMISSION)
   1229		c |= host->variant->cmdreg_stop;
   1230
   1231	c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
   1232	if (cmd->flags & MMC_RSP_PRESENT) {
   1233		if (cmd->flags & MMC_RSP_136)
   1234			c |= host->variant->cmdreg_lrsp_crc;
   1235		else if (cmd->flags & MMC_RSP_CRC)
   1236			c |= host->variant->cmdreg_srsp_crc;
   1237		else
   1238			c |= host->variant->cmdreg_srsp;
   1239	}
   1240
   1241	if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
   1242		if (!cmd->busy_timeout)
   1243			cmd->busy_timeout = 10 * MSEC_PER_SEC;
   1244
   1245		if (cmd->busy_timeout > host->mmc->max_busy_timeout)
   1246			clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
   1247		else
   1248			clks = (unsigned long long)cmd->busy_timeout * host->cclk;
   1249
   1250		do_div(clks, MSEC_PER_SEC);
   1251		writel_relaxed(clks, host->base + MMCIDATATIMER);
   1252	}
   1253
   1254	if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
   1255		host->ops->pre_sig_volt_switch(host);
   1256
   1257	if (/*interrupt*/0)
   1258		c |= MCI_CPSM_INTERRUPT;
   1259
   1260	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
   1261		c |= host->variant->data_cmd_enable;
   1262
   1263	host->cmd = cmd;
   1264
   1265	writel(cmd->arg, base + MMCIARGUMENT);
   1266	writel(c, base + MMCICOMMAND);
   1267}
   1268
   1269static void mmci_stop_command(struct mmci_host *host)
   1270{
   1271	host->stop_abort.error = 0;
   1272	mmci_start_command(host, &host->stop_abort, 0);
   1273}
   1274
   1275static void
   1276mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
   1277	      unsigned int status)
   1278{
   1279	unsigned int status_err;
   1280
   1281	/* Make sure we have data to handle */
   1282	if (!data)
   1283		return;
   1284
   1285	/* First check for errors */
   1286	status_err = status & (host->variant->start_err |
   1287			       MCI_DATACRCFAIL | MCI_DATATIMEOUT |
   1288			       MCI_TXUNDERRUN | MCI_RXOVERRUN);
   1289
   1290	if (status_err) {
   1291		u32 remain, success;
   1292
   1293		/* Terminate the DMA transfer */
   1294		mmci_dma_error(host);
   1295
   1296		/*
   1297		 * Calculate how far we are into the transfer.  Note that
   1298		 * the data counter gives the number of bytes transferred
   1299		 * on the MMC bus, not on the host side.  On reads, this
   1300		 * can be as much as a FIFO-worth of data ahead.  This
   1301		 * matters for FIFO overruns only.
   1302		 */
   1303		if (!host->variant->datacnt_useless) {
   1304			remain = readl(host->base + MMCIDATACNT);
   1305			success = data->blksz * data->blocks - remain;
   1306		} else {
   1307			success = 0;
   1308		}
   1309
   1310		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
   1311			status_err, success);
   1312		if (status_err & MCI_DATACRCFAIL) {
   1313			/* Last block was not successful */
   1314			success -= 1;
   1315			data->error = -EILSEQ;
   1316		} else if (status_err & MCI_DATATIMEOUT) {
   1317			data->error = -ETIMEDOUT;
   1318		} else if (status_err & MCI_STARTBITERR) {
   1319			data->error = -ECOMM;
   1320		} else if (status_err & MCI_TXUNDERRUN) {
   1321			data->error = -EIO;
   1322		} else if (status_err & MCI_RXOVERRUN) {
   1323			if (success > host->variant->fifosize)
   1324				success -= host->variant->fifosize;
   1325			else
   1326				success = 0;
   1327			data->error = -EIO;
   1328		}
   1329		data->bytes_xfered = round_down(success, data->blksz);
   1330	}
   1331
   1332	if (status & MCI_DATABLOCKEND)
   1333		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
   1334
   1335	if (status & MCI_DATAEND || data->error) {
   1336		mmci_dma_finalize(host, data);
   1337
   1338		mmci_stop_data(host);
   1339
   1340		if (!data->error)
   1341			/* The error clause is handled above, success! */
   1342			data->bytes_xfered = data->blksz * data->blocks;
   1343
   1344		if (!data->stop) {
   1345			if (host->variant->cmdreg_stop && data->error)
   1346				mmci_stop_command(host);
   1347			else
   1348				mmci_request_end(host, data->mrq);
   1349		} else if (host->mrq->sbc && !data->error) {
   1350			mmci_request_end(host, data->mrq);
   1351		} else {
   1352			mmci_start_command(host, data->stop, 0);
   1353		}
   1354	}
   1355}
   1356
   1357static void
   1358mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
   1359	     unsigned int status)
   1360{
   1361	u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
   1362	void __iomem *base = host->base;
   1363	bool sbc, busy_resp;
   1364
   1365	if (!cmd)
   1366		return;
   1367
   1368	sbc = (cmd == host->mrq->sbc);
   1369	busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
   1370
   1371	/*
   1372	 * We need to be one of these interrupts to be considered worth
   1373	 * handling. Note that we tag on any latent IRQs postponed
   1374	 * due to waiting for busy status.
   1375	 */
   1376	if (host->variant->busy_timeout && busy_resp)
   1377		err_msk |= MCI_DATATIMEOUT;
   1378
   1379	if (!((status | host->busy_status) &
   1380	      (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
   1381		return;
   1382
   1383	/* Handle busy detection on DAT0 if the variant supports it. */
   1384	if (busy_resp && host->variant->busy_detect)
   1385		if (!host->ops->busy_complete(host, status, err_msk))
   1386			return;
   1387
   1388	host->cmd = NULL;
   1389
   1390	if (status & MCI_CMDTIMEOUT) {
   1391		cmd->error = -ETIMEDOUT;
   1392	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
   1393		cmd->error = -EILSEQ;
   1394	} else if (host->variant->busy_timeout && busy_resp &&
   1395		   status & MCI_DATATIMEOUT) {
   1396		cmd->error = -ETIMEDOUT;
   1397		/*
   1398		 * This will wake up mmci_irq_thread() which will issue
   1399		 * a hardware reset of the MMCI block.
   1400		 */
   1401		host->irq_action = IRQ_WAKE_THREAD;
   1402	} else {
   1403		cmd->resp[0] = readl(base + MMCIRESPONSE0);
   1404		cmd->resp[1] = readl(base + MMCIRESPONSE1);
   1405		cmd->resp[2] = readl(base + MMCIRESPONSE2);
   1406		cmd->resp[3] = readl(base + MMCIRESPONSE3);
   1407	}
   1408
   1409	if ((!sbc && !cmd->data) || cmd->error) {
   1410		if (host->data) {
   1411			/* Terminate the DMA transfer */
   1412			mmci_dma_error(host);
   1413
   1414			mmci_stop_data(host);
   1415			if (host->variant->cmdreg_stop && cmd->error) {
   1416				mmci_stop_command(host);
   1417				return;
   1418			}
   1419		}
   1420
   1421		if (host->irq_action != IRQ_WAKE_THREAD)
   1422			mmci_request_end(host, host->mrq);
   1423
   1424	} else if (sbc) {
   1425		mmci_start_command(host, host->mrq->cmd, 0);
   1426	} else if (!host->variant->datactrl_first &&
   1427		   !(cmd->data->flags & MMC_DATA_READ)) {
   1428		mmci_start_data(host, cmd->data);
   1429	}
   1430}
   1431
   1432static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
   1433{
   1434	return remain - (readl(host->base + MMCIFIFOCNT) << 2);
   1435}
   1436
   1437static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
   1438{
   1439	/*
   1440	 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
   1441	 * from the fifo range should be used
   1442	 */
   1443	if (status & MCI_RXFIFOHALFFULL)
   1444		return host->variant->fifohalfsize;
   1445	else if (status & MCI_RXDATAAVLBL)
   1446		return 4;
   1447
   1448	return 0;
   1449}
   1450
   1451static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
   1452{
   1453	void __iomem *base = host->base;
   1454	char *ptr = buffer;
   1455	u32 status = readl(host->base + MMCISTATUS);
   1456	int host_remain = host->size;
   1457
   1458	do {
   1459		int count = host->get_rx_fifocnt(host, status, host_remain);
   1460
   1461		if (count > remain)
   1462			count = remain;
   1463
   1464		if (count <= 0)
   1465			break;
   1466
   1467		/*
   1468		 * SDIO especially may want to send something that is
   1469		 * not divisible by 4 (as opposed to card sectors
   1470		 * etc). Therefore make sure to always read the last bytes
   1471		 * while only doing full 32-bit reads towards the FIFO.
   1472		 */
   1473		if (unlikely(count & 0x3)) {
   1474			if (count < 4) {
   1475				unsigned char buf[4];
   1476				ioread32_rep(base + MMCIFIFO, buf, 1);
   1477				memcpy(ptr, buf, count);
   1478			} else {
   1479				ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
   1480				count &= ~0x3;
   1481			}
   1482		} else {
   1483			ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
   1484		}
   1485
   1486		ptr += count;
   1487		remain -= count;
   1488		host_remain -= count;
   1489
   1490		if (remain == 0)
   1491			break;
   1492
   1493		status = readl(base + MMCISTATUS);
   1494	} while (status & MCI_RXDATAAVLBL);
   1495
   1496	return ptr - buffer;
   1497}
   1498
   1499static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
   1500{
   1501	struct variant_data *variant = host->variant;
   1502	void __iomem *base = host->base;
   1503	char *ptr = buffer;
   1504
   1505	do {
   1506		unsigned int count, maxcnt;
   1507
   1508		maxcnt = status & MCI_TXFIFOEMPTY ?
   1509			 variant->fifosize : variant->fifohalfsize;
   1510		count = min(remain, maxcnt);
   1511
   1512		/*
   1513		 * SDIO especially may want to send something that is
   1514		 * not divisible by 4 (as opposed to card sectors
   1515		 * etc), and the FIFO only accept full 32-bit writes.
   1516		 * So compensate by adding +3 on the count, a single
   1517		 * byte become a 32bit write, 7 bytes will be two
   1518		 * 32bit writes etc.
   1519		 */
   1520		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
   1521
   1522		ptr += count;
   1523		remain -= count;
   1524
   1525		if (remain == 0)
   1526			break;
   1527
   1528		status = readl(base + MMCISTATUS);
   1529	} while (status & MCI_TXFIFOHALFEMPTY);
   1530
   1531	return ptr - buffer;
   1532}
   1533
   1534/*
   1535 * PIO data transfer IRQ handler.
   1536 */
   1537static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
   1538{
   1539	struct mmci_host *host = dev_id;
   1540	struct sg_mapping_iter *sg_miter = &host->sg_miter;
   1541	struct variant_data *variant = host->variant;
   1542	void __iomem *base = host->base;
   1543	u32 status;
   1544
   1545	status = readl(base + MMCISTATUS);
   1546
   1547	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
   1548
   1549	do {
   1550		unsigned int remain, len;
   1551		char *buffer;
   1552
   1553		/*
   1554		 * For write, we only need to test the half-empty flag
   1555		 * here - if the FIFO is completely empty, then by
   1556		 * definition it is more than half empty.
   1557		 *
   1558		 * For read, check for data available.
   1559		 */
   1560		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
   1561			break;
   1562
   1563		if (!sg_miter_next(sg_miter))
   1564			break;
   1565
   1566		buffer = sg_miter->addr;
   1567		remain = sg_miter->length;
   1568
   1569		len = 0;
   1570		if (status & MCI_RXACTIVE)
   1571			len = mmci_pio_read(host, buffer, remain);
   1572		if (status & MCI_TXACTIVE)
   1573			len = mmci_pio_write(host, buffer, remain, status);
   1574
   1575		sg_miter->consumed = len;
   1576
   1577		host->size -= len;
   1578		remain -= len;
   1579
   1580		if (remain)
   1581			break;
   1582
   1583		status = readl(base + MMCISTATUS);
   1584	} while (1);
   1585
   1586	sg_miter_stop(sg_miter);
   1587
   1588	/*
   1589	 * If we have less than the fifo 'half-full' threshold to transfer,
   1590	 * trigger a PIO interrupt as soon as any data is available.
   1591	 */
   1592	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
   1593		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
   1594
   1595	/*
   1596	 * If we run out of data, disable the data IRQs; this
   1597	 * prevents a race where the FIFO becomes empty before
   1598	 * the chip itself has disabled the data path, and
   1599	 * stops us racing with our data end IRQ.
   1600	 */
   1601	if (host->size == 0) {
   1602		mmci_set_mask1(host, 0);
   1603		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
   1604	}
   1605
   1606	return IRQ_HANDLED;
   1607}
   1608
   1609/*
   1610 * Handle completion of command and data transfers.
   1611 */
   1612static irqreturn_t mmci_irq(int irq, void *dev_id)
   1613{
   1614	struct mmci_host *host = dev_id;
   1615	u32 status;
   1616
   1617	spin_lock(&host->lock);
   1618	host->irq_action = IRQ_HANDLED;
   1619
   1620	do {
   1621		status = readl(host->base + MMCISTATUS);
   1622		if (!status)
   1623			break;
   1624
   1625		if (host->singleirq) {
   1626			if (status & host->mask1_reg)
   1627				mmci_pio_irq(irq, dev_id);
   1628
   1629			status &= ~host->variant->irq_pio_mask;
   1630		}
   1631
   1632		/*
   1633		 * Busy detection is managed by mmci_cmd_irq(), including to
   1634		 * clear the corresponding IRQ.
   1635		 */
   1636		status &= readl(host->base + MMCIMASK0);
   1637		if (host->variant->busy_detect)
   1638			writel(status & ~host->variant->busy_detect_mask,
   1639			       host->base + MMCICLEAR);
   1640		else
   1641			writel(status, host->base + MMCICLEAR);
   1642
   1643		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
   1644
   1645		if (host->variant->reversed_irq_handling) {
   1646			mmci_data_irq(host, host->data, status);
   1647			mmci_cmd_irq(host, host->cmd, status);
   1648		} else {
   1649			mmci_cmd_irq(host, host->cmd, status);
   1650			mmci_data_irq(host, host->data, status);
   1651		}
   1652
   1653		/*
   1654		 * Busy detection has been handled by mmci_cmd_irq() above.
   1655		 * Clear the status bit to prevent polling in IRQ context.
   1656		 */
   1657		if (host->variant->busy_detect_flag)
   1658			status &= ~host->variant->busy_detect_flag;
   1659
   1660	} while (status);
   1661
   1662	spin_unlock(&host->lock);
   1663
   1664	return host->irq_action;
   1665}
   1666
   1667/*
   1668 * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
   1669 *
   1670 * A reset is needed for some variants, where a datatimeout for a R1B request
   1671 * causes the DPSM to stay busy (non-functional).
   1672 */
   1673static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
   1674{
   1675	struct mmci_host *host = dev_id;
   1676	unsigned long flags;
   1677
   1678	if (host->rst) {
   1679		reset_control_assert(host->rst);
   1680		udelay(2);
   1681		reset_control_deassert(host->rst);
   1682	}
   1683
   1684	spin_lock_irqsave(&host->lock, flags);
   1685	writel(host->clk_reg, host->base + MMCICLOCK);
   1686	writel(host->pwr_reg, host->base + MMCIPOWER);
   1687	writel(MCI_IRQENABLE | host->variant->start_err,
   1688	       host->base + MMCIMASK0);
   1689
   1690	host->irq_action = IRQ_HANDLED;
   1691	mmci_request_end(host, host->mrq);
   1692	spin_unlock_irqrestore(&host->lock, flags);
   1693
   1694	return host->irq_action;
   1695}
   1696
   1697static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
   1698{
   1699	struct mmci_host *host = mmc_priv(mmc);
   1700	unsigned long flags;
   1701
   1702	WARN_ON(host->mrq != NULL);
   1703
   1704	mrq->cmd->error = mmci_validate_data(host, mrq->data);
   1705	if (mrq->cmd->error) {
   1706		mmc_request_done(mmc, mrq);
   1707		return;
   1708	}
   1709
   1710	spin_lock_irqsave(&host->lock, flags);
   1711
   1712	host->mrq = mrq;
   1713
   1714	if (mrq->data)
   1715		mmci_get_next_data(host, mrq->data);
   1716
   1717	if (mrq->data &&
   1718	    (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
   1719		mmci_start_data(host, mrq->data);
   1720
   1721	if (mrq->sbc)
   1722		mmci_start_command(host, mrq->sbc, 0);
   1723	else
   1724		mmci_start_command(host, mrq->cmd, 0);
   1725
   1726	spin_unlock_irqrestore(&host->lock, flags);
   1727}
   1728
   1729static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
   1730{
   1731	struct mmci_host *host = mmc_priv(mmc);
   1732	u32 max_busy_timeout = 0;
   1733
   1734	if (!host->variant->busy_detect)
   1735		return;
   1736
   1737	if (host->variant->busy_timeout && mmc->actual_clock)
   1738		max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
   1739
   1740	mmc->max_busy_timeout = max_busy_timeout;
   1741}
   1742
   1743static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
   1744{
   1745	struct mmci_host *host = mmc_priv(mmc);
   1746	struct variant_data *variant = host->variant;
   1747	u32 pwr = 0;
   1748	unsigned long flags;
   1749	int ret;
   1750
   1751	switch (ios->power_mode) {
   1752	case MMC_POWER_OFF:
   1753		if (!IS_ERR(mmc->supply.vmmc))
   1754			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
   1755
   1756		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
   1757			regulator_disable(mmc->supply.vqmmc);
   1758			host->vqmmc_enabled = false;
   1759		}
   1760
   1761		break;
   1762	case MMC_POWER_UP:
   1763		if (!IS_ERR(mmc->supply.vmmc))
   1764			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
   1765
   1766		/*
   1767		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
   1768		 * and instead uses MCI_PWR_ON so apply whatever value is
   1769		 * configured in the variant data.
   1770		 */
   1771		pwr |= variant->pwrreg_powerup;
   1772
   1773		break;
   1774	case MMC_POWER_ON:
   1775		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
   1776			ret = regulator_enable(mmc->supply.vqmmc);
   1777			if (ret < 0)
   1778				dev_err(mmc_dev(mmc),
   1779					"failed to enable vqmmc regulator\n");
   1780			else
   1781				host->vqmmc_enabled = true;
   1782		}
   1783
   1784		pwr |= MCI_PWR_ON;
   1785		break;
   1786	}
   1787
   1788	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
   1789		/*
   1790		 * The ST Micro variant has some additional bits
   1791		 * indicating signal direction for the signals in
   1792		 * the SD/MMC bus and feedback-clock usage.
   1793		 */
   1794		pwr |= host->pwr_reg_add;
   1795
   1796		if (ios->bus_width == MMC_BUS_WIDTH_4)
   1797			pwr &= ~MCI_ST_DATA74DIREN;
   1798		else if (ios->bus_width == MMC_BUS_WIDTH_1)
   1799			pwr &= (~MCI_ST_DATA74DIREN &
   1800				~MCI_ST_DATA31DIREN &
   1801				~MCI_ST_DATA2DIREN);
   1802	}
   1803
   1804	if (variant->opendrain) {
   1805		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
   1806			pwr |= variant->opendrain;
   1807	} else {
   1808		/*
   1809		 * If the variant cannot configure the pads by its own, then we
   1810		 * expect the pinctrl to be able to do that for us
   1811		 */
   1812		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
   1813			pinctrl_select_state(host->pinctrl, host->pins_opendrain);
   1814		else
   1815			pinctrl_select_default_state(mmc_dev(mmc));
   1816	}
   1817
   1818	/*
   1819	 * If clock = 0 and the variant requires the MMCIPOWER to be used for
   1820	 * gating the clock, the MCI_PWR_ON bit is cleared.
   1821	 */
   1822	if (!ios->clock && variant->pwrreg_clkgate)
   1823		pwr &= ~MCI_PWR_ON;
   1824
   1825	if (host->variant->explicit_mclk_control &&
   1826	    ios->clock != host->clock_cache) {
   1827		ret = clk_set_rate(host->clk, ios->clock);
   1828		if (ret < 0)
   1829			dev_err(mmc_dev(host->mmc),
   1830				"Error setting clock rate (%d)\n", ret);
   1831		else
   1832			host->mclk = clk_get_rate(host->clk);
   1833	}
   1834	host->clock_cache = ios->clock;
   1835
   1836	spin_lock_irqsave(&host->lock, flags);
   1837
   1838	if (host->ops && host->ops->set_clkreg)
   1839		host->ops->set_clkreg(host, ios->clock);
   1840	else
   1841		mmci_set_clkreg(host, ios->clock);
   1842
   1843	mmci_set_max_busy_timeout(mmc);
   1844
   1845	if (host->ops && host->ops->set_pwrreg)
   1846		host->ops->set_pwrreg(host, pwr);
   1847	else
   1848		mmci_write_pwrreg(host, pwr);
   1849
   1850	mmci_reg_delay(host);
   1851
   1852	spin_unlock_irqrestore(&host->lock, flags);
   1853}
   1854
   1855static int mmci_get_cd(struct mmc_host *mmc)
   1856{
   1857	struct mmci_host *host = mmc_priv(mmc);
   1858	struct mmci_platform_data *plat = host->plat;
   1859	unsigned int status = mmc_gpio_get_cd(mmc);
   1860
   1861	if (status == -ENOSYS) {
   1862		if (!plat->status)
   1863			return 1; /* Assume always present */
   1864
   1865		status = plat->status(mmc_dev(host->mmc));
   1866	}
   1867	return status;
   1868}
   1869
   1870static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
   1871{
   1872	struct mmci_host *host = mmc_priv(mmc);
   1873	int ret;
   1874
   1875	ret = mmc_regulator_set_vqmmc(mmc, ios);
   1876
   1877	if (!ret && host->ops && host->ops->post_sig_volt_switch)
   1878		ret = host->ops->post_sig_volt_switch(host, ios);
   1879	else if (ret)
   1880		ret = 0;
   1881
   1882	if (ret < 0)
   1883		dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
   1884
   1885	return ret;
   1886}
   1887
   1888static struct mmc_host_ops mmci_ops = {
   1889	.request	= mmci_request,
   1890	.pre_req	= mmci_pre_request,
   1891	.post_req	= mmci_post_request,
   1892	.set_ios	= mmci_set_ios,
   1893	.get_ro		= mmc_gpio_get_ro,
   1894	.get_cd		= mmci_get_cd,
   1895	.start_signal_voltage_switch = mmci_sig_volt_switch,
   1896};
   1897
   1898static void mmci_probe_level_translator(struct mmc_host *mmc)
   1899{
   1900	struct device *dev = mmc_dev(mmc);
   1901	struct mmci_host *host = mmc_priv(mmc);
   1902	struct gpio_desc *cmd_gpio;
   1903	struct gpio_desc *ck_gpio;
   1904	struct gpio_desc *ckin_gpio;
   1905	int clk_hi, clk_lo;
   1906
   1907	/*
   1908	 * Assume the level translator is present if st,use-ckin is set.
   1909	 * This is to cater for DTs which do not implement this test.
   1910	 */
   1911	host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
   1912
   1913	cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
   1914	if (IS_ERR(cmd_gpio))
   1915		goto exit_cmd;
   1916
   1917	ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
   1918	if (IS_ERR(ck_gpio))
   1919		goto exit_ck;
   1920
   1921	ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
   1922	if (IS_ERR(ckin_gpio))
   1923		goto exit_ckin;
   1924
   1925	/* All GPIOs are valid, test whether level translator works */
   1926
   1927	/* Sample CKIN */
   1928	clk_hi = !!gpiod_get_value(ckin_gpio);
   1929
   1930	/* Set CK low */
   1931	gpiod_set_value(ck_gpio, 0);
   1932
   1933	/* Sample CKIN */
   1934	clk_lo = !!gpiod_get_value(ckin_gpio);
   1935
   1936	/* Tristate all */
   1937	gpiod_direction_input(cmd_gpio);
   1938	gpiod_direction_input(ck_gpio);
   1939
   1940	/* Level translator is present if CK signal is propagated to CKIN */
   1941	if (!clk_hi || clk_lo) {
   1942		host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
   1943		dev_warn(dev,
   1944			 "Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
   1945	}
   1946
   1947	gpiod_put(ckin_gpio);
   1948
   1949exit_ckin:
   1950	gpiod_put(ck_gpio);
   1951exit_ck:
   1952	gpiod_put(cmd_gpio);
   1953exit_cmd:
   1954	pinctrl_select_default_state(dev);
   1955}
   1956
   1957static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
   1958{
   1959	struct mmci_host *host = mmc_priv(mmc);
   1960	int ret = mmc_of_parse(mmc);
   1961
   1962	if (ret)
   1963		return ret;
   1964
   1965	if (of_get_property(np, "st,sig-dir-dat0", NULL))
   1966		host->pwr_reg_add |= MCI_ST_DATA0DIREN;
   1967	if (of_get_property(np, "st,sig-dir-dat2", NULL))
   1968		host->pwr_reg_add |= MCI_ST_DATA2DIREN;
   1969	if (of_get_property(np, "st,sig-dir-dat31", NULL))
   1970		host->pwr_reg_add |= MCI_ST_DATA31DIREN;
   1971	if (of_get_property(np, "st,sig-dir-dat74", NULL))
   1972		host->pwr_reg_add |= MCI_ST_DATA74DIREN;
   1973	if (of_get_property(np, "st,sig-dir-cmd", NULL))
   1974		host->pwr_reg_add |= MCI_ST_CMDDIREN;
   1975	if (of_get_property(np, "st,sig-pin-fbclk", NULL))
   1976		host->pwr_reg_add |= MCI_ST_FBCLKEN;
   1977	if (of_get_property(np, "st,sig-dir", NULL))
   1978		host->pwr_reg_add |= MCI_STM32_DIRPOL;
   1979	if (of_get_property(np, "st,neg-edge", NULL))
   1980		host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
   1981	if (of_get_property(np, "st,use-ckin", NULL))
   1982		mmci_probe_level_translator(mmc);
   1983
   1984	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
   1985		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
   1986	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
   1987		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
   1988
   1989	return 0;
   1990}
   1991
   1992static int mmci_probe(struct amba_device *dev,
   1993	const struct amba_id *id)
   1994{
   1995	struct mmci_platform_data *plat = dev->dev.platform_data;
   1996	struct device_node *np = dev->dev.of_node;
   1997	struct variant_data *variant = id->data;
   1998	struct mmci_host *host;
   1999	struct mmc_host *mmc;
   2000	int ret;
   2001
   2002	/* Must have platform data or Device Tree. */
   2003	if (!plat && !np) {
   2004		dev_err(&dev->dev, "No plat data or DT found\n");
   2005		return -EINVAL;
   2006	}
   2007
   2008	if (!plat) {
   2009		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
   2010		if (!plat)
   2011			return -ENOMEM;
   2012	}
   2013
   2014	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
   2015	if (!mmc)
   2016		return -ENOMEM;
   2017
   2018	host = mmc_priv(mmc);
   2019	host->mmc = mmc;
   2020	host->mmc_ops = &mmci_ops;
   2021	mmc->ops = &mmci_ops;
   2022
   2023	ret = mmci_of_parse(np, mmc);
   2024	if (ret)
   2025		goto host_free;
   2026
   2027	/*
   2028	 * Some variant (STM32) doesn't have opendrain bit, nevertheless
   2029	 * pins can be set accordingly using pinctrl
   2030	 */
   2031	if (!variant->opendrain) {
   2032		host->pinctrl = devm_pinctrl_get(&dev->dev);
   2033		if (IS_ERR(host->pinctrl)) {
   2034			dev_err(&dev->dev, "failed to get pinctrl");
   2035			ret = PTR_ERR(host->pinctrl);
   2036			goto host_free;
   2037		}
   2038
   2039		host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
   2040							    MMCI_PINCTRL_STATE_OPENDRAIN);
   2041		if (IS_ERR(host->pins_opendrain)) {
   2042			dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
   2043			ret = PTR_ERR(host->pins_opendrain);
   2044			goto host_free;
   2045		}
   2046	}
   2047
   2048	host->hw_designer = amba_manf(dev);
   2049	host->hw_revision = amba_rev(dev);
   2050	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
   2051	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
   2052
   2053	host->clk = devm_clk_get(&dev->dev, NULL);
   2054	if (IS_ERR(host->clk)) {
   2055		ret = PTR_ERR(host->clk);
   2056		goto host_free;
   2057	}
   2058
   2059	ret = clk_prepare_enable(host->clk);
   2060	if (ret)
   2061		goto host_free;
   2062
   2063	if (variant->qcom_fifo)
   2064		host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
   2065	else
   2066		host->get_rx_fifocnt = mmci_get_rx_fifocnt;
   2067
   2068	host->plat = plat;
   2069	host->variant = variant;
   2070	host->mclk = clk_get_rate(host->clk);
   2071	/*
   2072	 * According to the spec, mclk is max 100 MHz,
   2073	 * so we try to adjust the clock down to this,
   2074	 * (if possible).
   2075	 */
   2076	if (host->mclk > variant->f_max) {
   2077		ret = clk_set_rate(host->clk, variant->f_max);
   2078		if (ret < 0)
   2079			goto clk_disable;
   2080		host->mclk = clk_get_rate(host->clk);
   2081		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
   2082			host->mclk);
   2083	}
   2084
   2085	host->phybase = dev->res.start;
   2086	host->base = devm_ioremap_resource(&dev->dev, &dev->res);
   2087	if (IS_ERR(host->base)) {
   2088		ret = PTR_ERR(host->base);
   2089		goto clk_disable;
   2090	}
   2091
   2092	if (variant->init)
   2093		variant->init(host);
   2094
   2095	/*
   2096	 * The ARM and ST versions of the block have slightly different
   2097	 * clock divider equations which means that the minimum divider
   2098	 * differs too.
   2099	 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
   2100	 */
   2101	if (variant->st_clkdiv)
   2102		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
   2103	else if (variant->stm32_clkdiv)
   2104		mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
   2105	else if (variant->explicit_mclk_control)
   2106		mmc->f_min = clk_round_rate(host->clk, 100000);
   2107	else
   2108		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
   2109	/*
   2110	 * If no maximum operating frequency is supplied, fall back to use
   2111	 * the module parameter, which has a (low) default value in case it
   2112	 * is not specified. Either value must not exceed the clock rate into
   2113	 * the block, of course.
   2114	 */
   2115	if (mmc->f_max)
   2116		mmc->f_max = variant->explicit_mclk_control ?
   2117				min(variant->f_max, mmc->f_max) :
   2118				min(host->mclk, mmc->f_max);
   2119	else
   2120		mmc->f_max = variant->explicit_mclk_control ?
   2121				fmax : min(host->mclk, fmax);
   2122
   2123
   2124	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
   2125
   2126	host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
   2127	if (IS_ERR(host->rst)) {
   2128		ret = PTR_ERR(host->rst);
   2129		goto clk_disable;
   2130	}
   2131	ret = reset_control_deassert(host->rst);
   2132	if (ret)
   2133		dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
   2134
   2135	/* Get regulators and the supported OCR mask */
   2136	ret = mmc_regulator_get_supply(mmc);
   2137	if (ret)
   2138		goto clk_disable;
   2139
   2140	if (!mmc->ocr_avail)
   2141		mmc->ocr_avail = plat->ocr_mask;
   2142	else if (plat->ocr_mask)
   2143		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
   2144
   2145	/* We support these capabilities. */
   2146	mmc->caps |= MMC_CAP_CMD23;
   2147
   2148	/*
   2149	 * Enable busy detection.
   2150	 */
   2151	if (variant->busy_detect) {
   2152		mmci_ops.card_busy = mmci_card_busy;
   2153		/*
   2154		 * Not all variants have a flag to enable busy detection
   2155		 * in the DPSM, but if they do, set it here.
   2156		 */
   2157		if (variant->busy_dpsm_flag)
   2158			mmci_write_datactrlreg(host,
   2159					       host->variant->busy_dpsm_flag);
   2160		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
   2161	}
   2162
   2163	/* Variants with mandatory busy timeout in HW needs R1B responses. */
   2164	if (variant->busy_timeout)
   2165		mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
   2166
   2167	/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
   2168	host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
   2169	host->stop_abort.arg = 0;
   2170	host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
   2171
   2172	/* We support these PM capabilities. */
   2173	mmc->pm_caps |= MMC_PM_KEEP_POWER;
   2174
   2175	/*
   2176	 * We can do SGIO
   2177	 */
   2178	mmc->max_segs = NR_SG;
   2179
   2180	/*
   2181	 * Since only a certain number of bits are valid in the data length
   2182	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
   2183	 * single request.
   2184	 */
   2185	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
   2186
   2187	/*
   2188	 * Set the maximum segment size.  Since we aren't doing DMA
   2189	 * (yet) we are only limited by the data length register.
   2190	 */
   2191	mmc->max_seg_size = mmc->max_req_size;
   2192
   2193	/*
   2194	 * Block size can be up to 2048 bytes, but must be a power of two.
   2195	 */
   2196	mmc->max_blk_size = 1 << variant->datactrl_blocksz;
   2197
   2198	/*
   2199	 * Limit the number of blocks transferred so that we don't overflow
   2200	 * the maximum request size.
   2201	 */
   2202	mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
   2203
   2204	spin_lock_init(&host->lock);
   2205
   2206	writel(0, host->base + MMCIMASK0);
   2207
   2208	if (variant->mmcimask1)
   2209		writel(0, host->base + MMCIMASK1);
   2210
   2211	writel(0xfff, host->base + MMCICLEAR);
   2212
   2213	/*
   2214	 * If:
   2215	 * - not using DT but using a descriptor table, or
   2216	 * - using a table of descriptors ALONGSIDE DT, or
   2217	 * look up these descriptors named "cd" and "wp" right here, fail
   2218	 * silently of these do not exist
   2219	 */
   2220	if (!np) {
   2221		ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
   2222		if (ret == -EPROBE_DEFER)
   2223			goto clk_disable;
   2224
   2225		ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
   2226		if (ret == -EPROBE_DEFER)
   2227			goto clk_disable;
   2228	}
   2229
   2230	ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
   2231					mmci_irq_thread, IRQF_SHARED,
   2232					DRIVER_NAME " (cmd)", host);
   2233	if (ret)
   2234		goto clk_disable;
   2235
   2236	if (!dev->irq[1])
   2237		host->singleirq = true;
   2238	else {
   2239		ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
   2240				IRQF_SHARED, DRIVER_NAME " (pio)", host);
   2241		if (ret)
   2242			goto clk_disable;
   2243	}
   2244
   2245	writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
   2246
   2247	amba_set_drvdata(dev, mmc);
   2248
   2249	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
   2250		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
   2251		 amba_rev(dev), (unsigned long long)dev->res.start,
   2252		 dev->irq[0], dev->irq[1]);
   2253
   2254	mmci_dma_setup(host);
   2255
   2256	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
   2257	pm_runtime_use_autosuspend(&dev->dev);
   2258
   2259	mmc_add_host(mmc);
   2260
   2261	pm_runtime_put(&dev->dev);
   2262	return 0;
   2263
   2264 clk_disable:
   2265	clk_disable_unprepare(host->clk);
   2266 host_free:
   2267	mmc_free_host(mmc);
   2268	return ret;
   2269}
   2270
   2271static void mmci_remove(struct amba_device *dev)
   2272{
   2273	struct mmc_host *mmc = amba_get_drvdata(dev);
   2274
   2275	if (mmc) {
   2276		struct mmci_host *host = mmc_priv(mmc);
   2277		struct variant_data *variant = host->variant;
   2278
   2279		/*
   2280		 * Undo pm_runtime_put() in probe.  We use the _sync
   2281		 * version here so that we can access the primecell.
   2282		 */
   2283		pm_runtime_get_sync(&dev->dev);
   2284
   2285		mmc_remove_host(mmc);
   2286
   2287		writel(0, host->base + MMCIMASK0);
   2288
   2289		if (variant->mmcimask1)
   2290			writel(0, host->base + MMCIMASK1);
   2291
   2292		writel(0, host->base + MMCICOMMAND);
   2293		writel(0, host->base + MMCIDATACTRL);
   2294
   2295		mmci_dma_release(host);
   2296		clk_disable_unprepare(host->clk);
   2297		mmc_free_host(mmc);
   2298	}
   2299}
   2300
   2301#ifdef CONFIG_PM
   2302static void mmci_save(struct mmci_host *host)
   2303{
   2304	unsigned long flags;
   2305
   2306	spin_lock_irqsave(&host->lock, flags);
   2307
   2308	writel(0, host->base + MMCIMASK0);
   2309	if (host->variant->pwrreg_nopower) {
   2310		writel(0, host->base + MMCIDATACTRL);
   2311		writel(0, host->base + MMCIPOWER);
   2312		writel(0, host->base + MMCICLOCK);
   2313	}
   2314	mmci_reg_delay(host);
   2315
   2316	spin_unlock_irqrestore(&host->lock, flags);
   2317}
   2318
   2319static void mmci_restore(struct mmci_host *host)
   2320{
   2321	unsigned long flags;
   2322
   2323	spin_lock_irqsave(&host->lock, flags);
   2324
   2325	if (host->variant->pwrreg_nopower) {
   2326		writel(host->clk_reg, host->base + MMCICLOCK);
   2327		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
   2328		writel(host->pwr_reg, host->base + MMCIPOWER);
   2329	}
   2330	writel(MCI_IRQENABLE | host->variant->start_err,
   2331	       host->base + MMCIMASK0);
   2332	mmci_reg_delay(host);
   2333
   2334	spin_unlock_irqrestore(&host->lock, flags);
   2335}
   2336
   2337static int mmci_runtime_suspend(struct device *dev)
   2338{
   2339	struct amba_device *adev = to_amba_device(dev);
   2340	struct mmc_host *mmc = amba_get_drvdata(adev);
   2341
   2342	if (mmc) {
   2343		struct mmci_host *host = mmc_priv(mmc);
   2344		pinctrl_pm_select_sleep_state(dev);
   2345		mmci_save(host);
   2346		clk_disable_unprepare(host->clk);
   2347	}
   2348
   2349	return 0;
   2350}
   2351
   2352static int mmci_runtime_resume(struct device *dev)
   2353{
   2354	struct amba_device *adev = to_amba_device(dev);
   2355	struct mmc_host *mmc = amba_get_drvdata(adev);
   2356
   2357	if (mmc) {
   2358		struct mmci_host *host = mmc_priv(mmc);
   2359		clk_prepare_enable(host->clk);
   2360		mmci_restore(host);
   2361		pinctrl_select_default_state(dev);
   2362	}
   2363
   2364	return 0;
   2365}
   2366#endif
   2367
   2368static const struct dev_pm_ops mmci_dev_pm_ops = {
   2369	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
   2370				pm_runtime_force_resume)
   2371	SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
   2372};
   2373
   2374static const struct amba_id mmci_ids[] = {
   2375	{
   2376		.id	= 0x00041180,
   2377		.mask	= 0xff0fffff,
   2378		.data	= &variant_arm,
   2379	},
   2380	{
   2381		.id	= 0x01041180,
   2382		.mask	= 0xff0fffff,
   2383		.data	= &variant_arm_extended_fifo,
   2384	},
   2385	{
   2386		.id	= 0x02041180,
   2387		.mask	= 0xff0fffff,
   2388		.data	= &variant_arm_extended_fifo_hwfc,
   2389	},
   2390	{
   2391		.id	= 0x00041181,
   2392		.mask	= 0x000fffff,
   2393		.data	= &variant_arm,
   2394	},
   2395	/* ST Micro variants */
   2396	{
   2397		.id     = 0x00180180,
   2398		.mask   = 0x00ffffff,
   2399		.data	= &variant_u300,
   2400	},
   2401	{
   2402		.id     = 0x10180180,
   2403		.mask   = 0xf0ffffff,
   2404		.data	= &variant_nomadik,
   2405	},
   2406	{
   2407		.id     = 0x00280180,
   2408		.mask   = 0x00ffffff,
   2409		.data	= &variant_nomadik,
   2410	},
   2411	{
   2412		.id     = 0x00480180,
   2413		.mask   = 0xf0ffffff,
   2414		.data	= &variant_ux500,
   2415	},
   2416	{
   2417		.id     = 0x10480180,
   2418		.mask   = 0xf0ffffff,
   2419		.data	= &variant_ux500v2,
   2420	},
   2421	{
   2422		.id     = 0x00880180,
   2423		.mask   = 0x00ffffff,
   2424		.data	= &variant_stm32,
   2425	},
   2426	{
   2427		.id     = 0x10153180,
   2428		.mask	= 0xf0ffffff,
   2429		.data	= &variant_stm32_sdmmc,
   2430	},
   2431	{
   2432		.id     = 0x00253180,
   2433		.mask	= 0xf0ffffff,
   2434		.data	= &variant_stm32_sdmmcv2,
   2435	},
   2436	{
   2437		.id     = 0x20253180,
   2438		.mask	= 0xf0ffffff,
   2439		.data	= &variant_stm32_sdmmcv2,
   2440	},
   2441	/* Qualcomm variants */
   2442	{
   2443		.id     = 0x00051180,
   2444		.mask	= 0x000fffff,
   2445		.data	= &variant_qcom,
   2446	},
   2447	{ 0, 0 },
   2448};
   2449
   2450MODULE_DEVICE_TABLE(amba, mmci_ids);
   2451
   2452static struct amba_driver mmci_driver = {
   2453	.drv		= {
   2454		.name	= DRIVER_NAME,
   2455		.pm	= &mmci_dev_pm_ops,
   2456	},
   2457	.probe		= mmci_probe,
   2458	.remove		= mmci_remove,
   2459	.id_table	= mmci_ids,
   2460};
   2461
   2462module_amba_driver(mmci_driver);
   2463
   2464module_param(fmax, uint, 0444);
   2465
   2466MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
   2467MODULE_LICENSE("GPL");