cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sdhci-of-esdhc.c (41556B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Freescale eSDHC controller driver.
      4 *
      5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
      6 * Copyright (c) 2009 MontaVista Software, Inc.
      7 * Copyright 2020 NXP
      8 *
      9 * Authors: Xiaobo Xie <X.Xie@freescale.com>
     10 *	    Anton Vorontsov <avorontsov@ru.mvista.com>
     11 */
     12
     13#include <linux/err.h>
     14#include <linux/io.h>
     15#include <linux/of.h>
     16#include <linux/of_address.h>
     17#include <linux/delay.h>
     18#include <linux/module.h>
     19#include <linux/sys_soc.h>
     20#include <linux/clk.h>
     21#include <linux/ktime.h>
     22#include <linux/dma-mapping.h>
     23#include <linux/iopoll.h>
     24#include <linux/mmc/host.h>
     25#include <linux/mmc/mmc.h>
     26#include "sdhci-pltfm.h"
     27#include "sdhci-esdhc.h"
     28
     29#define VENDOR_V_22	0x12
     30#define VENDOR_V_23	0x13
     31
     32#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
     33
     34struct esdhc_clk_fixup {
     35	const unsigned int sd_dflt_max_clk;
     36	const unsigned int max_clk[MMC_TIMING_NUM];
     37};
     38
     39static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
     40	.sd_dflt_max_clk = 25000000,
     41	.max_clk[MMC_TIMING_MMC_HS] = 46500000,
     42	.max_clk[MMC_TIMING_SD_HS] = 46500000,
     43};
     44
     45static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
     46	.sd_dflt_max_clk = 25000000,
     47	.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
     48	.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
     49};
     50
     51static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
     52	.sd_dflt_max_clk = 25000000,
     53	.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
     54	.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
     55};
     56
     57static const struct esdhc_clk_fixup p1010_esdhc_clk = {
     58	.sd_dflt_max_clk = 20000000,
     59	.max_clk[MMC_TIMING_LEGACY] = 20000000,
     60	.max_clk[MMC_TIMING_MMC_HS] = 42000000,
     61	.max_clk[MMC_TIMING_SD_HS] = 40000000,
     62};
     63
     64static const struct of_device_id sdhci_esdhc_of_match[] = {
     65	{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
     66	{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
     67	{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
     68	{ .compatible = "fsl,p1010-esdhc",   .data = &p1010_esdhc_clk},
     69	{ .compatible = "fsl,mpc8379-esdhc" },
     70	{ .compatible = "fsl,mpc8536-esdhc" },
     71	{ .compatible = "fsl,esdhc" },
     72	{ }
     73};
     74MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
     75
     76struct sdhci_esdhc {
     77	u8 vendor_ver;
     78	u8 spec_ver;
     79	bool quirk_incorrect_hostver;
     80	bool quirk_limited_clk_division;
     81	bool quirk_unreliable_pulse_detection;
     82	bool quirk_tuning_erratum_type1;
     83	bool quirk_tuning_erratum_type2;
     84	bool quirk_ignore_data_inhibit;
     85	bool quirk_delay_before_data_reset;
     86	bool quirk_trans_complete_erratum;
     87	bool in_sw_tuning;
     88	unsigned int peripheral_clock;
     89	const struct esdhc_clk_fixup *clk_fixup;
     90	u32 div_ratio;
     91};
     92
     93/**
     94 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
     95 *		       to make it compatible with SD spec.
     96 *
     97 * @host: pointer to sdhci_host
     98 * @spec_reg: SD spec register address
     99 * @value: 32bit eSDHC register value on spec_reg address
    100 *
    101 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
    102 * registers are 32 bits. There are differences in register size, register
    103 * address, register function, bit position and function between eSDHC spec
    104 * and SD spec.
    105 *
    106 * Return a fixed up register value
    107 */
    108static u32 esdhc_readl_fixup(struct sdhci_host *host,
    109				     int spec_reg, u32 value)
    110{
    111	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    112	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    113	u32 ret;
    114
    115	/*
    116	 * The bit of ADMA flag in eSDHC is not compatible with standard
    117	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
    118	 * supported by eSDHC.
    119	 * And for many FSL eSDHC controller, the reset value of field
    120	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
    121	 * only these vendor version is greater than 2.2/0x12 support ADMA.
    122	 */
    123	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
    124		if (esdhc->vendor_ver > VENDOR_V_22) {
    125			ret = value | SDHCI_CAN_DO_ADMA2;
    126			return ret;
    127		}
    128	}
    129	/*
    130	 * The DAT[3:0] line signal levels and the CMD line signal level are
    131	 * not compatible with standard SDHC register. The line signal levels
    132	 * DAT[7:0] are at bits 31:24 and the command line signal level is at
    133	 * bit 23. All other bits are the same as in the standard SDHC
    134	 * register.
    135	 */
    136	if (spec_reg == SDHCI_PRESENT_STATE) {
    137		ret = value & 0x000fffff;
    138		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
    139		ret |= (value << 1) & SDHCI_CMD_LVL;
    140		return ret;
    141	}
    142
    143	/*
    144	 * DTS properties of mmc host are used to enable each speed mode
    145	 * according to soc and board capability. So clean up
    146	 * SDR50/SDR104/DDR50 support bits here.
    147	 */
    148	if (spec_reg == SDHCI_CAPABILITIES_1) {
    149		ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
    150				SDHCI_SUPPORT_DDR50);
    151		return ret;
    152	}
    153
    154	/*
    155	 * Some controllers have unreliable Data Line Active
    156	 * bit for commands with busy signal. This affects
    157	 * Command Inhibit (data) bit. Just ignore it since
    158	 * MMC core driver has already polled card status
    159	 * with CMD13 after any command with busy siganl.
    160	 */
    161	if ((spec_reg == SDHCI_PRESENT_STATE) &&
    162	(esdhc->quirk_ignore_data_inhibit == true)) {
    163		ret = value & ~SDHCI_DATA_INHIBIT;
    164		return ret;
    165	}
    166
    167	ret = value;
    168	return ret;
    169}
    170
    171static u16 esdhc_readw_fixup(struct sdhci_host *host,
    172				     int spec_reg, u32 value)
    173{
    174	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    175	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    176	u16 ret;
    177	int shift = (spec_reg & 0x2) * 8;
    178
    179	if (spec_reg == SDHCI_TRANSFER_MODE)
    180		return pltfm_host->xfer_mode_shadow;
    181
    182	if (spec_reg == SDHCI_HOST_VERSION)
    183		ret = value & 0xffff;
    184	else
    185		ret = (value >> shift) & 0xffff;
    186	/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
    187	 * vendor version and spec version information.
    188	 */
    189	if ((spec_reg == SDHCI_HOST_VERSION) &&
    190	    (esdhc->quirk_incorrect_hostver))
    191		ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
    192	return ret;
    193}
    194
    195static u8 esdhc_readb_fixup(struct sdhci_host *host,
    196				     int spec_reg, u32 value)
    197{
    198	u8 ret;
    199	u8 dma_bits;
    200	int shift = (spec_reg & 0x3) * 8;
    201
    202	ret = (value >> shift) & 0xff;
    203
    204	/*
    205	 * "DMA select" locates at offset 0x28 in SD specification, but on
    206	 * P5020 or P3041, it locates at 0x29.
    207	 */
    208	if (spec_reg == SDHCI_HOST_CONTROL) {
    209		/* DMA select is 22,23 bits in Protocol Control Register */
    210		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
    211		/* fixup the result */
    212		ret &= ~SDHCI_CTRL_DMA_MASK;
    213		ret |= dma_bits;
    214	}
    215	return ret;
    216}
    217
    218/**
    219 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
    220 *			written into eSDHC register.
    221 *
    222 * @host: pointer to sdhci_host
    223 * @spec_reg: SD spec register address
    224 * @value: 8/16/32bit SD spec register value that would be written
    225 * @old_value: 32bit eSDHC register value on spec_reg address
    226 *
    227 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
    228 * registers are 32 bits. There are differences in register size, register
    229 * address, register function, bit position and function between eSDHC spec
    230 * and SD spec.
    231 *
    232 * Return a fixed up register value
    233 */
    234static u32 esdhc_writel_fixup(struct sdhci_host *host,
    235				     int spec_reg, u32 value, u32 old_value)
    236{
    237	u32 ret;
    238
    239	/*
    240	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
    241	 * when SYSCTL[RSTD] is set for some special operations.
    242	 * No any impact on other operation.
    243	 */
    244	if (spec_reg == SDHCI_INT_ENABLE)
    245		ret = value | SDHCI_INT_BLK_GAP;
    246	else
    247		ret = value;
    248
    249	return ret;
    250}
    251
    252static u32 esdhc_writew_fixup(struct sdhci_host *host,
    253				     int spec_reg, u16 value, u32 old_value)
    254{
    255	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    256	int shift = (spec_reg & 0x2) * 8;
    257	u32 ret;
    258
    259	switch (spec_reg) {
    260	case SDHCI_TRANSFER_MODE:
    261		/*
    262		 * Postpone this write, we must do it together with a
    263		 * command write that is down below. Return old value.
    264		 */
    265		pltfm_host->xfer_mode_shadow = value;
    266		return old_value;
    267	case SDHCI_COMMAND:
    268		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
    269		return ret;
    270	}
    271
    272	ret = old_value & (~(0xffff << shift));
    273	ret |= (value << shift);
    274
    275	if (spec_reg == SDHCI_BLOCK_SIZE) {
    276		/*
    277		 * Two last DMA bits are reserved, and first one is used for
    278		 * non-standard blksz of 4096 bytes that we don't support
    279		 * yet. So clear the DMA boundary bits.
    280		 */
    281		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
    282	}
    283	return ret;
    284}
    285
    286static u32 esdhc_writeb_fixup(struct sdhci_host *host,
    287				     int spec_reg, u8 value, u32 old_value)
    288{
    289	u32 ret;
    290	u32 dma_bits;
    291	u8 tmp;
    292	int shift = (spec_reg & 0x3) * 8;
    293
    294	/*
    295	 * eSDHC doesn't have a standard power control register, so we do
    296	 * nothing here to avoid incorrect operation.
    297	 */
    298	if (spec_reg == SDHCI_POWER_CONTROL)
    299		return old_value;
    300	/*
    301	 * "DMA select" location is offset 0x28 in SD specification, but on
    302	 * P5020 or P3041, it's located at 0x29.
    303	 */
    304	if (spec_reg == SDHCI_HOST_CONTROL) {
    305		/*
    306		 * If host control register is not standard, exit
    307		 * this function
    308		 */
    309		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
    310			return old_value;
    311
    312		/* DMA select is 22,23 bits in Protocol Control Register */
    313		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
    314		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
    315		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
    316		      (old_value & SDHCI_CTRL_DMA_MASK);
    317		ret = (ret & (~0xff)) | tmp;
    318
    319		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
    320		ret &= ~ESDHC_HOST_CONTROL_RES;
    321		return ret;
    322	}
    323
    324	ret = (old_value & (~(0xff << shift))) | (value << shift);
    325	return ret;
    326}
    327
    328static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
    329{
    330	u32 ret;
    331	u32 value;
    332
    333	if (reg == SDHCI_CAPABILITIES_1)
    334		value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
    335	else
    336		value = ioread32be(host->ioaddr + reg);
    337
    338	ret = esdhc_readl_fixup(host, reg, value);
    339
    340	return ret;
    341}
    342
    343static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
    344{
    345	u32 ret;
    346	u32 value;
    347
    348	if (reg == SDHCI_CAPABILITIES_1)
    349		value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
    350	else
    351		value = ioread32(host->ioaddr + reg);
    352
    353	ret = esdhc_readl_fixup(host, reg, value);
    354
    355	return ret;
    356}
    357
    358static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
    359{
    360	u16 ret;
    361	u32 value;
    362	int base = reg & ~0x3;
    363
    364	value = ioread32be(host->ioaddr + base);
    365	ret = esdhc_readw_fixup(host, reg, value);
    366	return ret;
    367}
    368
    369static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
    370{
    371	u16 ret;
    372	u32 value;
    373	int base = reg & ~0x3;
    374
    375	value = ioread32(host->ioaddr + base);
    376	ret = esdhc_readw_fixup(host, reg, value);
    377	return ret;
    378}
    379
    380static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
    381{
    382	u8 ret;
    383	u32 value;
    384	int base = reg & ~0x3;
    385
    386	value = ioread32be(host->ioaddr + base);
    387	ret = esdhc_readb_fixup(host, reg, value);
    388	return ret;
    389}
    390
    391static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
    392{
    393	u8 ret;
    394	u32 value;
    395	int base = reg & ~0x3;
    396
    397	value = ioread32(host->ioaddr + base);
    398	ret = esdhc_readb_fixup(host, reg, value);
    399	return ret;
    400}
    401
    402static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
    403{
    404	u32 value;
    405
    406	value = esdhc_writel_fixup(host, reg, val, 0);
    407	iowrite32be(value, host->ioaddr + reg);
    408}
    409
    410static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
    411{
    412	u32 value;
    413
    414	value = esdhc_writel_fixup(host, reg, val, 0);
    415	iowrite32(value, host->ioaddr + reg);
    416}
    417
    418static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
    419{
    420	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    421	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    422	int base = reg & ~0x3;
    423	u32 value;
    424	u32 ret;
    425
    426	value = ioread32be(host->ioaddr + base);
    427	ret = esdhc_writew_fixup(host, reg, val, value);
    428	if (reg != SDHCI_TRANSFER_MODE)
    429		iowrite32be(ret, host->ioaddr + base);
    430
    431	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
    432	 * 1us later after ESDHC_EXTN is set.
    433	 */
    434	if (base == ESDHC_SYSTEM_CONTROL_2) {
    435		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
    436		    esdhc->in_sw_tuning) {
    437			udelay(1);
    438			ret |= ESDHC_SMPCLKSEL;
    439			iowrite32be(ret, host->ioaddr + base);
    440		}
    441	}
    442}
    443
    444static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
    445{
    446	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    447	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    448	int base = reg & ~0x3;
    449	u32 value;
    450	u32 ret;
    451
    452	value = ioread32(host->ioaddr + base);
    453	ret = esdhc_writew_fixup(host, reg, val, value);
    454	if (reg != SDHCI_TRANSFER_MODE)
    455		iowrite32(ret, host->ioaddr + base);
    456
    457	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
    458	 * 1us later after ESDHC_EXTN is set.
    459	 */
    460	if (base == ESDHC_SYSTEM_CONTROL_2) {
    461		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
    462		    esdhc->in_sw_tuning) {
    463			udelay(1);
    464			ret |= ESDHC_SMPCLKSEL;
    465			iowrite32(ret, host->ioaddr + base);
    466		}
    467	}
    468}
    469
    470static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
    471{
    472	int base = reg & ~0x3;
    473	u32 value;
    474	u32 ret;
    475
    476	value = ioread32be(host->ioaddr + base);
    477	ret = esdhc_writeb_fixup(host, reg, val, value);
    478	iowrite32be(ret, host->ioaddr + base);
    479}
    480
    481static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
    482{
    483	int base = reg & ~0x3;
    484	u32 value;
    485	u32 ret;
    486
    487	value = ioread32(host->ioaddr + base);
    488	ret = esdhc_writeb_fixup(host, reg, val, value);
    489	iowrite32(ret, host->ioaddr + base);
    490}
    491
    492/*
    493 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
    494 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
    495 * and Block Gap Event(IRQSTAT[BGE]) are also set.
    496 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
    497 * and re-issue the entire read transaction from beginning.
    498 */
    499static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
    500{
    501	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    502	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    503	bool applicable;
    504	dma_addr_t dmastart;
    505	dma_addr_t dmanow;
    506
    507	applicable = (intmask & SDHCI_INT_DATA_END) &&
    508		     (intmask & SDHCI_INT_BLK_GAP) &&
    509		     (esdhc->vendor_ver == VENDOR_V_23);
    510	if (!applicable)
    511		return;
    512
    513	host->data->error = 0;
    514	dmastart = sg_dma_address(host->data->sg);
    515	dmanow = dmastart + host->data->bytes_xfered;
    516	/*
    517	 * Force update to the next DMA block boundary.
    518	 */
    519	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
    520		SDHCI_DEFAULT_BOUNDARY_SIZE;
    521	host->data->bytes_xfered = dmanow - dmastart;
    522	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
    523}
    524
    525static int esdhc_of_enable_dma(struct sdhci_host *host)
    526{
    527	int ret;
    528	u32 value;
    529	struct device *dev = mmc_dev(host->mmc);
    530
    531	if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
    532	    of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
    533		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
    534		if (ret)
    535			return ret;
    536	}
    537
    538	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
    539
    540	if (of_dma_is_coherent(dev->of_node))
    541		value |= ESDHC_DMA_SNOOP;
    542	else
    543		value &= ~ESDHC_DMA_SNOOP;
    544
    545	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
    546	return 0;
    547}
    548
    549static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
    550{
    551	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    552	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    553
    554	if (esdhc->peripheral_clock)
    555		return esdhc->peripheral_clock;
    556	else
    557		return pltfm_host->clock;
    558}
    559
    560static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
    561{
    562	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    563	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    564	unsigned int clock;
    565
    566	if (esdhc->peripheral_clock)
    567		clock = esdhc->peripheral_clock;
    568	else
    569		clock = pltfm_host->clock;
    570	return clock / 256 / 16;
    571}
    572
    573static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
    574{
    575	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    576	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    577	ktime_t timeout;
    578	u32 val, clk_en;
    579
    580	clk_en = ESDHC_CLOCK_SDCLKEN;
    581
    582	/*
    583	 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
    584	 * is 2.2 or lower.
    585	 */
    586	if (esdhc->vendor_ver <= VENDOR_V_22)
    587		clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
    588			   ESDHC_CLOCK_PEREN);
    589
    590	val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
    591
    592	if (enable)
    593		val |= clk_en;
    594	else
    595		val &= ~clk_en;
    596
    597	sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
    598
    599	/*
    600	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
    601	 * wait clock stable bit which does not exist.
    602	 */
    603	timeout = ktime_add_ms(ktime_get(), 20);
    604	while (esdhc->vendor_ver > VENDOR_V_22) {
    605		bool timedout = ktime_after(ktime_get(), timeout);
    606
    607		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
    608			break;
    609		if (timedout) {
    610			pr_err("%s: Internal clock never stabilised.\n",
    611				mmc_hostname(host->mmc));
    612			break;
    613		}
    614		usleep_range(10, 20);
    615	}
    616}
    617
    618static void esdhc_flush_async_fifo(struct sdhci_host *host)
    619{
    620	ktime_t timeout;
    621	u32 val;
    622
    623	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
    624	val |= ESDHC_FLUSH_ASYNC_FIFO;
    625	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
    626
    627	/* Wait max 20 ms */
    628	timeout = ktime_add_ms(ktime_get(), 20);
    629	while (1) {
    630		bool timedout = ktime_after(ktime_get(), timeout);
    631
    632		if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
    633		      ESDHC_FLUSH_ASYNC_FIFO))
    634			break;
    635		if (timedout) {
    636			pr_err("%s: flushing asynchronous FIFO timeout.\n",
    637				mmc_hostname(host->mmc));
    638			break;
    639		}
    640		usleep_range(10, 20);
    641	}
    642}
    643
    644static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
    645{
    646	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    647	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    648	unsigned int pre_div = 1, div = 1;
    649	unsigned int clock_fixup = 0;
    650	ktime_t timeout;
    651	u32 temp;
    652
    653	if (clock == 0) {
    654		host->mmc->actual_clock = 0;
    655		esdhc_clock_enable(host, false);
    656		return;
    657	}
    658
    659	/* Start pre_div at 2 for vendor version < 2.3. */
    660	if (esdhc->vendor_ver < VENDOR_V_23)
    661		pre_div = 2;
    662
    663	/* Fix clock value. */
    664	if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
    665	    esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
    666		clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
    667	else if (esdhc->clk_fixup)
    668		clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
    669
    670	if (clock_fixup == 0 || clock < clock_fixup)
    671		clock_fixup = clock;
    672
    673	/* Calculate pre_div and div. */
    674	while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
    675		pre_div *= 2;
    676
    677	while (host->max_clk / pre_div / div > clock_fixup && div < 16)
    678		div++;
    679
    680	esdhc->div_ratio = pre_div * div;
    681
    682	/* Limit clock division for HS400 200MHz clock for quirk. */
    683	if (esdhc->quirk_limited_clk_division &&
    684	    clock == MMC_HS200_MAX_DTR &&
    685	    (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
    686	     host->flags & SDHCI_HS400_TUNING)) {
    687		if (esdhc->div_ratio <= 4) {
    688			pre_div = 4;
    689			div = 1;
    690		} else if (esdhc->div_ratio <= 8) {
    691			pre_div = 4;
    692			div = 2;
    693		} else if (esdhc->div_ratio <= 12) {
    694			pre_div = 4;
    695			div = 3;
    696		} else {
    697			pr_warn("%s: using unsupported clock division.\n",
    698				mmc_hostname(host->mmc));
    699		}
    700		esdhc->div_ratio = pre_div * div;
    701	}
    702
    703	host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
    704
    705	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
    706		clock, host->mmc->actual_clock);
    707
    708	/* Set clock division into register. */
    709	pre_div >>= 1;
    710	div--;
    711
    712	esdhc_clock_enable(host, false);
    713
    714	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
    715	temp &= ~ESDHC_CLOCK_MASK;
    716	temp |= ((div << ESDHC_DIVIDER_SHIFT) |
    717		(pre_div << ESDHC_PREDIV_SHIFT));
    718	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
    719
    720	/*
    721	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
    722	 * wait clock stable bit which does not exist.
    723	 */
    724	timeout = ktime_add_ms(ktime_get(), 20);
    725	while (esdhc->vendor_ver > VENDOR_V_22) {
    726		bool timedout = ktime_after(ktime_get(), timeout);
    727
    728		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
    729			break;
    730		if (timedout) {
    731			pr_err("%s: Internal clock never stabilised.\n",
    732				mmc_hostname(host->mmc));
    733			break;
    734		}
    735		usleep_range(10, 20);
    736	}
    737
    738	/* Additional setting for HS400. */
    739	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
    740	    clock == MMC_HS200_MAX_DTR) {
    741		temp = sdhci_readl(host, ESDHC_TBCTL);
    742		sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
    743		temp = sdhci_readl(host, ESDHC_SDCLKCTL);
    744		sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
    745		esdhc_clock_enable(host, true);
    746
    747		temp = sdhci_readl(host, ESDHC_DLLCFG0);
    748		temp |= ESDHC_DLL_ENABLE;
    749		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
    750			temp |= ESDHC_DLL_FREQ_SEL;
    751		sdhci_writel(host, temp, ESDHC_DLLCFG0);
    752
    753		temp |= ESDHC_DLL_RESET;
    754		sdhci_writel(host, temp, ESDHC_DLLCFG0);
    755		udelay(1);
    756		temp &= ~ESDHC_DLL_RESET;
    757		sdhci_writel(host, temp, ESDHC_DLLCFG0);
    758
    759		/* Wait max 20 ms */
    760		if (read_poll_timeout(sdhci_readl, temp,
    761				      temp & ESDHC_DLL_STS_SLV_LOCK,
    762				      10, 20000, false,
    763				      host, ESDHC_DLLSTAT0))
    764			pr_err("%s: timeout for delay chain lock.\n",
    765			       mmc_hostname(host->mmc));
    766
    767		temp = sdhci_readl(host, ESDHC_TBCTL);
    768		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
    769
    770		esdhc_clock_enable(host, false);
    771		esdhc_flush_async_fifo(host);
    772	}
    773	esdhc_clock_enable(host, true);
    774}
    775
    776static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
    777{
    778	u32 ctrl;
    779
    780	ctrl = sdhci_readl(host, ESDHC_PROCTL);
    781	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
    782	switch (width) {
    783	case MMC_BUS_WIDTH_8:
    784		ctrl |= ESDHC_CTRL_8BITBUS;
    785		break;
    786
    787	case MMC_BUS_WIDTH_4:
    788		ctrl |= ESDHC_CTRL_4BITBUS;
    789		break;
    790
    791	default:
    792		break;
    793	}
    794
    795	sdhci_writel(host, ctrl, ESDHC_PROCTL);
    796}
    797
    798static void esdhc_reset(struct sdhci_host *host, u8 mask)
    799{
    800	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    801	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    802	u32 val, bus_width = 0;
    803
    804	/*
    805	 * Add delay to make sure all the DMA transfers are finished
    806	 * for quirk.
    807	 */
    808	if (esdhc->quirk_delay_before_data_reset &&
    809	    (mask & SDHCI_RESET_DATA) &&
    810	    (host->flags & SDHCI_REQ_USE_DMA))
    811		mdelay(5);
    812
    813	/*
    814	 * Save bus-width for eSDHC whose vendor version is 2.2
    815	 * or lower for data reset.
    816	 */
    817	if ((mask & SDHCI_RESET_DATA) &&
    818	    (esdhc->vendor_ver <= VENDOR_V_22)) {
    819		val = sdhci_readl(host, ESDHC_PROCTL);
    820		bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
    821	}
    822
    823	sdhci_reset(host, mask);
    824
    825	/*
    826	 * Restore bus-width setting and interrupt registers for eSDHC
    827	 * whose vendor version is 2.2 or lower for data reset.
    828	 */
    829	if ((mask & SDHCI_RESET_DATA) &&
    830	    (esdhc->vendor_ver <= VENDOR_V_22)) {
    831		val = sdhci_readl(host, ESDHC_PROCTL);
    832		val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
    833		val |= bus_width;
    834		sdhci_writel(host, val, ESDHC_PROCTL);
    835
    836		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
    837		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
    838	}
    839
    840	/*
    841	 * Some bits have to be cleaned manually for eSDHC whose spec
    842	 * version is higher than 3.0 for all reset.
    843	 */
    844	if ((mask & SDHCI_RESET_ALL) &&
    845	    (esdhc->spec_ver >= SDHCI_SPEC_300)) {
    846		val = sdhci_readl(host, ESDHC_TBCTL);
    847		val &= ~ESDHC_TB_EN;
    848		sdhci_writel(host, val, ESDHC_TBCTL);
    849
    850		/*
    851		 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
    852		 * 0 for quirk.
    853		 */
    854		if (esdhc->quirk_unreliable_pulse_detection) {
    855			val = sdhci_readl(host, ESDHC_DLLCFG1);
    856			val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
    857			sdhci_writel(host, val, ESDHC_DLLCFG1);
    858		}
    859	}
    860}
    861
    862/* The SCFG, Supplemental Configuration Unit, provides SoC specific
    863 * configuration and status registers for the device. There is a
    864 * SDHC IO VSEL control register on SCFG for some platforms. It's
    865 * used to support SDHC IO voltage switching.
    866 */
    867static const struct of_device_id scfg_device_ids[] = {
    868	{ .compatible = "fsl,t1040-scfg", },
    869	{ .compatible = "fsl,ls1012a-scfg", },
    870	{ .compatible = "fsl,ls1046a-scfg", },
    871	{}
    872};
    873
    874/* SDHC IO VSEL control register definition */
    875#define SCFG_SDHCIOVSELCR	0x408
    876#define SDHCIOVSELCR_TGLEN	0x80000000
    877#define SDHCIOVSELCR_VSELVAL	0x60000000
    878#define SDHCIOVSELCR_SDHC_VS	0x00000001
    879
    880static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
    881				       struct mmc_ios *ios)
    882{
    883	struct sdhci_host *host = mmc_priv(mmc);
    884	struct device_node *scfg_node;
    885	void __iomem *scfg_base = NULL;
    886	u32 sdhciovselcr;
    887	u32 val;
    888
    889	/*
    890	 * Signal Voltage Switching is only applicable for Host Controllers
    891	 * v3.00 and above.
    892	 */
    893	if (host->version < SDHCI_SPEC_300)
    894		return 0;
    895
    896	val = sdhci_readl(host, ESDHC_PROCTL);
    897
    898	switch (ios->signal_voltage) {
    899	case MMC_SIGNAL_VOLTAGE_330:
    900		val &= ~ESDHC_VOLT_SEL;
    901		sdhci_writel(host, val, ESDHC_PROCTL);
    902		return 0;
    903	case MMC_SIGNAL_VOLTAGE_180:
    904		scfg_node = of_find_matching_node(NULL, scfg_device_ids);
    905		if (scfg_node)
    906			scfg_base = of_iomap(scfg_node, 0);
    907		if (scfg_base) {
    908			sdhciovselcr = SDHCIOVSELCR_TGLEN |
    909				       SDHCIOVSELCR_VSELVAL;
    910			iowrite32be(sdhciovselcr,
    911				scfg_base + SCFG_SDHCIOVSELCR);
    912
    913			val |= ESDHC_VOLT_SEL;
    914			sdhci_writel(host, val, ESDHC_PROCTL);
    915			mdelay(5);
    916
    917			sdhciovselcr = SDHCIOVSELCR_TGLEN |
    918				       SDHCIOVSELCR_SDHC_VS;
    919			iowrite32be(sdhciovselcr,
    920				scfg_base + SCFG_SDHCIOVSELCR);
    921			iounmap(scfg_base);
    922		} else {
    923			val |= ESDHC_VOLT_SEL;
    924			sdhci_writel(host, val, ESDHC_PROCTL);
    925		}
    926		return 0;
    927	default:
    928		return 0;
    929	}
    930}
    931
    932static struct soc_device_attribute soc_tuning_erratum_type1[] = {
    933	{ .family = "QorIQ T1023", },
    934	{ .family = "QorIQ T1040", },
    935	{ .family = "QorIQ T2080", },
    936	{ .family = "QorIQ LS1021A", },
    937	{ /* sentinel */ }
    938};
    939
    940static struct soc_device_attribute soc_tuning_erratum_type2[] = {
    941	{ .family = "QorIQ LS1012A", },
    942	{ .family = "QorIQ LS1043A", },
    943	{ .family = "QorIQ LS1046A", },
    944	{ .family = "QorIQ LS1080A", },
    945	{ .family = "QorIQ LS2080A", },
    946	{ .family = "QorIQ LA1575A", },
    947	{ /* sentinel */ }
    948};
    949
    950static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
    951{
    952	u32 val;
    953
    954	esdhc_clock_enable(host, false);
    955	esdhc_flush_async_fifo(host);
    956
    957	val = sdhci_readl(host, ESDHC_TBCTL);
    958	if (enable)
    959		val |= ESDHC_TB_EN;
    960	else
    961		val &= ~ESDHC_TB_EN;
    962	sdhci_writel(host, val, ESDHC_TBCTL);
    963
    964	esdhc_clock_enable(host, true);
    965}
    966
    967static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
    968				    u8 *window_end)
    969{
    970	u32 val;
    971
    972	/* Write TBCTL[11:8]=4'h8 */
    973	val = sdhci_readl(host, ESDHC_TBCTL);
    974	val &= ~(0xf << 8);
    975	val |= 8 << 8;
    976	sdhci_writel(host, val, ESDHC_TBCTL);
    977
    978	mdelay(1);
    979
    980	/* Read TBCTL[31:0] register and rewrite again */
    981	val = sdhci_readl(host, ESDHC_TBCTL);
    982	sdhci_writel(host, val, ESDHC_TBCTL);
    983
    984	mdelay(1);
    985
    986	/* Read the TBSTAT[31:0] register twice */
    987	val = sdhci_readl(host, ESDHC_TBSTAT);
    988	val = sdhci_readl(host, ESDHC_TBSTAT);
    989
    990	*window_end = val & 0xff;
    991	*window_start = (val >> 8) & 0xff;
    992}
    993
    994static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
    995				    u8 *window_end)
    996{
    997	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
    998	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
    999	u8 start_ptr, end_ptr;
   1000
   1001	if (esdhc->quirk_tuning_erratum_type1) {
   1002		*window_start = 5 * esdhc->div_ratio;
   1003		*window_end = 3 * esdhc->div_ratio;
   1004		return;
   1005	}
   1006
   1007	esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
   1008
   1009	/* Reset data lines by setting ESDHCCTL[RSTD] */
   1010	sdhci_reset(host, SDHCI_RESET_DATA);
   1011	/* Write 32'hFFFF_FFFF to IRQSTAT register */
   1012	sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
   1013
   1014	/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
   1015	 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
   1016	 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
   1017	 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
   1018	 */
   1019
   1020	if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
   1021		*window_start = 8 * esdhc->div_ratio;
   1022		*window_end = 4 * esdhc->div_ratio;
   1023	} else {
   1024		*window_start = 5 * esdhc->div_ratio;
   1025		*window_end = 3 * esdhc->div_ratio;
   1026	}
   1027}
   1028
   1029static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
   1030				   u8 window_start, u8 window_end)
   1031{
   1032	struct sdhci_host *host = mmc_priv(mmc);
   1033	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
   1034	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
   1035	u32 val;
   1036	int ret;
   1037
   1038	/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
   1039	val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
   1040	      ESDHC_WNDW_STRT_PTR_MASK;
   1041	val |= window_end & ESDHC_WNDW_END_PTR_MASK;
   1042	sdhci_writel(host, val, ESDHC_TBPTR);
   1043
   1044	/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
   1045	val = sdhci_readl(host, ESDHC_TBCTL);
   1046	val &= ~ESDHC_TB_MODE_MASK;
   1047	val |= ESDHC_TB_MODE_SW;
   1048	sdhci_writel(host, val, ESDHC_TBCTL);
   1049
   1050	esdhc->in_sw_tuning = true;
   1051	ret = sdhci_execute_tuning(mmc, opcode);
   1052	esdhc->in_sw_tuning = false;
   1053	return ret;
   1054}
   1055
   1056static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
   1057{
   1058	struct sdhci_host *host = mmc_priv(mmc);
   1059	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
   1060	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
   1061	u8 window_start, window_end;
   1062	int ret, retries = 1;
   1063	bool hs400_tuning;
   1064	unsigned int clk;
   1065	u32 val;
   1066
   1067	/* For tuning mode, the sd clock divisor value
   1068	 * must be larger than 3 according to reference manual.
   1069	 */
   1070	clk = esdhc->peripheral_clock / 3;
   1071	if (host->clock > clk)
   1072		esdhc_of_set_clock(host, clk);
   1073
   1074	esdhc_tuning_block_enable(host, true);
   1075
   1076	/*
   1077	 * The eSDHC controller takes the data timeout value into account
   1078	 * during tuning. If the SD card is too slow sending the response, the
   1079	 * timer will expire and a "Buffer Read Ready" interrupt without data
   1080	 * is triggered. This leads to tuning errors.
   1081	 *
   1082	 * Just set the timeout to the maximum value because the core will
   1083	 * already take care of it in sdhci_send_tuning().
   1084	 */
   1085	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
   1086
   1087	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
   1088
   1089	do {
   1090		if (esdhc->quirk_limited_clk_division &&
   1091		    hs400_tuning)
   1092			esdhc_of_set_clock(host, host->clock);
   1093
   1094		/* Do HW tuning */
   1095		val = sdhci_readl(host, ESDHC_TBCTL);
   1096		val &= ~ESDHC_TB_MODE_MASK;
   1097		val |= ESDHC_TB_MODE_3;
   1098		sdhci_writel(host, val, ESDHC_TBCTL);
   1099
   1100		ret = sdhci_execute_tuning(mmc, opcode);
   1101		if (ret)
   1102			break;
   1103
   1104		/* For type2 affected platforms of the tuning erratum,
   1105		 * tuning may succeed although eSDHC might not have
   1106		 * tuned properly. Need to check tuning window.
   1107		 */
   1108		if (esdhc->quirk_tuning_erratum_type2 &&
   1109		    !host->tuning_err) {
   1110			esdhc_tuning_window_ptr(host, &window_start,
   1111						&window_end);
   1112			if (abs(window_start - window_end) >
   1113			    (4 * esdhc->div_ratio + 2))
   1114				host->tuning_err = -EAGAIN;
   1115		}
   1116
   1117		/* If HW tuning fails and triggers erratum,
   1118		 * try workaround.
   1119		 */
   1120		ret = host->tuning_err;
   1121		if (ret == -EAGAIN &&
   1122		    (esdhc->quirk_tuning_erratum_type1 ||
   1123		     esdhc->quirk_tuning_erratum_type2)) {
   1124			/* Recover HS400 tuning flag */
   1125			if (hs400_tuning)
   1126				host->flags |= SDHCI_HS400_TUNING;
   1127			pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
   1128				mmc_hostname(mmc));
   1129			/* Do SW tuning */
   1130			esdhc_prepare_sw_tuning(host, &window_start,
   1131						&window_end);
   1132			ret = esdhc_execute_sw_tuning(mmc, opcode,
   1133						      window_start,
   1134						      window_end);
   1135			if (ret)
   1136				break;
   1137
   1138			/* Retry both HW/SW tuning with reduced clock. */
   1139			ret = host->tuning_err;
   1140			if (ret == -EAGAIN && retries) {
   1141				/* Recover HS400 tuning flag */
   1142				if (hs400_tuning)
   1143					host->flags |= SDHCI_HS400_TUNING;
   1144
   1145				clk = host->max_clk / (esdhc->div_ratio + 1);
   1146				esdhc_of_set_clock(host, clk);
   1147				pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
   1148					mmc_hostname(mmc));
   1149			} else {
   1150				break;
   1151			}
   1152		} else {
   1153			break;
   1154		}
   1155	} while (retries--);
   1156
   1157	if (ret) {
   1158		esdhc_tuning_block_enable(host, false);
   1159	} else if (hs400_tuning) {
   1160		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
   1161		val |= ESDHC_FLW_CTL_BG;
   1162		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
   1163	}
   1164
   1165	return ret;
   1166}
   1167
   1168static void esdhc_set_uhs_signaling(struct sdhci_host *host,
   1169				   unsigned int timing)
   1170{
   1171	u32 val;
   1172
   1173	/*
   1174	 * There are specific registers setting for HS400 mode.
   1175	 * Clean all of them if controller is in HS400 mode to
   1176	 * exit HS400 mode before re-setting any speed mode.
   1177	 */
   1178	val = sdhci_readl(host, ESDHC_TBCTL);
   1179	if (val & ESDHC_HS400_MODE) {
   1180		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
   1181		val &= ~ESDHC_FLW_CTL_BG;
   1182		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
   1183
   1184		val = sdhci_readl(host, ESDHC_SDCLKCTL);
   1185		val &= ~ESDHC_CMD_CLK_CTL;
   1186		sdhci_writel(host, val, ESDHC_SDCLKCTL);
   1187
   1188		esdhc_clock_enable(host, false);
   1189		val = sdhci_readl(host, ESDHC_TBCTL);
   1190		val &= ~ESDHC_HS400_MODE;
   1191		sdhci_writel(host, val, ESDHC_TBCTL);
   1192		esdhc_clock_enable(host, true);
   1193
   1194		val = sdhci_readl(host, ESDHC_DLLCFG0);
   1195		val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
   1196		sdhci_writel(host, val, ESDHC_DLLCFG0);
   1197
   1198		val = sdhci_readl(host, ESDHC_TBCTL);
   1199		val &= ~ESDHC_HS400_WNDW_ADJUST;
   1200		sdhci_writel(host, val, ESDHC_TBCTL);
   1201
   1202		esdhc_tuning_block_enable(host, false);
   1203	}
   1204
   1205	if (timing == MMC_TIMING_MMC_HS400)
   1206		esdhc_tuning_block_enable(host, true);
   1207	else
   1208		sdhci_set_uhs_signaling(host, timing);
   1209}
   1210
   1211static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
   1212{
   1213	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
   1214	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
   1215	u32 command;
   1216
   1217	if (esdhc->quirk_trans_complete_erratum) {
   1218		command = SDHCI_GET_CMD(sdhci_readw(host,
   1219					SDHCI_COMMAND));
   1220		if (command == MMC_WRITE_MULTIPLE_BLOCK &&
   1221				sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
   1222				intmask & SDHCI_INT_DATA_END) {
   1223			intmask &= ~SDHCI_INT_DATA_END;
   1224			sdhci_writel(host, SDHCI_INT_DATA_END,
   1225					SDHCI_INT_STATUS);
   1226		}
   1227	}
   1228	return intmask;
   1229}
   1230
   1231#ifdef CONFIG_PM_SLEEP
   1232static u32 esdhc_proctl;
   1233static int esdhc_of_suspend(struct device *dev)
   1234{
   1235	struct sdhci_host *host = dev_get_drvdata(dev);
   1236
   1237	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
   1238
   1239	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
   1240		mmc_retune_needed(host->mmc);
   1241
   1242	return sdhci_suspend_host(host);
   1243}
   1244
   1245static int esdhc_of_resume(struct device *dev)
   1246{
   1247	struct sdhci_host *host = dev_get_drvdata(dev);
   1248	int ret = sdhci_resume_host(host);
   1249
   1250	if (ret == 0) {
   1251		/* Isn't this already done by sdhci_resume_host() ? --rmk */
   1252		esdhc_of_enable_dma(host);
   1253		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
   1254	}
   1255	return ret;
   1256}
   1257#endif
   1258
   1259static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
   1260			esdhc_of_suspend,
   1261			esdhc_of_resume);
   1262
   1263static const struct sdhci_ops sdhci_esdhc_be_ops = {
   1264	.read_l = esdhc_be_readl,
   1265	.read_w = esdhc_be_readw,
   1266	.read_b = esdhc_be_readb,
   1267	.write_l = esdhc_be_writel,
   1268	.write_w = esdhc_be_writew,
   1269	.write_b = esdhc_be_writeb,
   1270	.set_clock = esdhc_of_set_clock,
   1271	.enable_dma = esdhc_of_enable_dma,
   1272	.get_max_clock = esdhc_of_get_max_clock,
   1273	.get_min_clock = esdhc_of_get_min_clock,
   1274	.adma_workaround = esdhc_of_adma_workaround,
   1275	.set_bus_width = esdhc_pltfm_set_bus_width,
   1276	.reset = esdhc_reset,
   1277	.set_uhs_signaling = esdhc_set_uhs_signaling,
   1278	.irq = esdhc_irq,
   1279};
   1280
   1281static const struct sdhci_ops sdhci_esdhc_le_ops = {
   1282	.read_l = esdhc_le_readl,
   1283	.read_w = esdhc_le_readw,
   1284	.read_b = esdhc_le_readb,
   1285	.write_l = esdhc_le_writel,
   1286	.write_w = esdhc_le_writew,
   1287	.write_b = esdhc_le_writeb,
   1288	.set_clock = esdhc_of_set_clock,
   1289	.enable_dma = esdhc_of_enable_dma,
   1290	.get_max_clock = esdhc_of_get_max_clock,
   1291	.get_min_clock = esdhc_of_get_min_clock,
   1292	.adma_workaround = esdhc_of_adma_workaround,
   1293	.set_bus_width = esdhc_pltfm_set_bus_width,
   1294	.reset = esdhc_reset,
   1295	.set_uhs_signaling = esdhc_set_uhs_signaling,
   1296	.irq = esdhc_irq,
   1297};
   1298
   1299static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
   1300	.quirks = ESDHC_DEFAULT_QUIRKS |
   1301#ifdef CONFIG_PPC
   1302		  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
   1303#endif
   1304		  SDHCI_QUIRK_NO_CARD_NO_RESET |
   1305		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
   1306	.ops = &sdhci_esdhc_be_ops,
   1307};
   1308
   1309static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
   1310	.quirks = ESDHC_DEFAULT_QUIRKS |
   1311		  SDHCI_QUIRK_NO_CARD_NO_RESET |
   1312		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
   1313	.ops = &sdhci_esdhc_le_ops,
   1314};
   1315
   1316static struct soc_device_attribute soc_incorrect_hostver[] = {
   1317	{ .family = "QorIQ T4240", .revision = "1.0", },
   1318	{ .family = "QorIQ T4240", .revision = "2.0", },
   1319	{ /* sentinel */ }
   1320};
   1321
   1322static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
   1323	{ .family = "QorIQ LX2160A", .revision = "1.0", },
   1324	{ .family = "QorIQ LX2160A", .revision = "2.0", },
   1325	{ .family = "QorIQ LS1028A", .revision = "1.0", },
   1326	{ /* sentinel */ }
   1327};
   1328
   1329static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
   1330	{ .family = "QorIQ LX2160A", .revision = "1.0", },
   1331	{ .family = "QorIQ LX2160A", .revision = "2.0", },
   1332	{ .family = "QorIQ LS1028A", .revision = "1.0", },
   1333	{ /* sentinel */ }
   1334};
   1335
   1336static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
   1337{
   1338	const struct of_device_id *match;
   1339	struct sdhci_pltfm_host *pltfm_host;
   1340	struct sdhci_esdhc *esdhc;
   1341	struct device_node *np;
   1342	struct clk *clk;
   1343	u32 val;
   1344	u16 host_ver;
   1345
   1346	pltfm_host = sdhci_priv(host);
   1347	esdhc = sdhci_pltfm_priv(pltfm_host);
   1348
   1349	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
   1350	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
   1351			     SDHCI_VENDOR_VER_SHIFT;
   1352	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
   1353	if (soc_device_match(soc_incorrect_hostver))
   1354		esdhc->quirk_incorrect_hostver = true;
   1355	else
   1356		esdhc->quirk_incorrect_hostver = false;
   1357
   1358	if (soc_device_match(soc_fixup_sdhc_clkdivs))
   1359		esdhc->quirk_limited_clk_division = true;
   1360	else
   1361		esdhc->quirk_limited_clk_division = false;
   1362
   1363	if (soc_device_match(soc_unreliable_pulse_detection))
   1364		esdhc->quirk_unreliable_pulse_detection = true;
   1365	else
   1366		esdhc->quirk_unreliable_pulse_detection = false;
   1367
   1368	match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
   1369	if (match)
   1370		esdhc->clk_fixup = match->data;
   1371	np = pdev->dev.of_node;
   1372
   1373	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
   1374		esdhc->quirk_delay_before_data_reset = true;
   1375		esdhc->quirk_trans_complete_erratum = true;
   1376	}
   1377
   1378	clk = of_clk_get(np, 0);
   1379	if (!IS_ERR(clk)) {
   1380		/*
   1381		 * esdhc->peripheral_clock would be assigned with a value
   1382		 * which is eSDHC base clock when use periperal clock.
   1383		 * For some platforms, the clock value got by common clk
   1384		 * API is peripheral clock while the eSDHC base clock is
   1385		 * 1/2 peripheral clock.
   1386		 */
   1387		if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
   1388		    of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
   1389		    of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
   1390			esdhc->peripheral_clock = clk_get_rate(clk) / 2;
   1391		else
   1392			esdhc->peripheral_clock = clk_get_rate(clk);
   1393
   1394		clk_put(clk);
   1395	}
   1396
   1397	esdhc_clock_enable(host, false);
   1398	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
   1399	/*
   1400	 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
   1401	 * initialize it as 1 or 0 once, to override the different value
   1402	 * which may be configured in bootloader.
   1403	 */
   1404	if (esdhc->peripheral_clock)
   1405		val |= ESDHC_PERIPHERAL_CLK_SEL;
   1406	else
   1407		val &= ~ESDHC_PERIPHERAL_CLK_SEL;
   1408	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
   1409	esdhc_clock_enable(host, true);
   1410}
   1411
   1412static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
   1413{
   1414	esdhc_tuning_block_enable(mmc_priv(mmc), false);
   1415	return 0;
   1416}
   1417
   1418static int sdhci_esdhc_probe(struct platform_device *pdev)
   1419{
   1420	struct sdhci_host *host;
   1421	struct device_node *np;
   1422	struct sdhci_pltfm_host *pltfm_host;
   1423	struct sdhci_esdhc *esdhc;
   1424	int ret;
   1425
   1426	np = pdev->dev.of_node;
   1427
   1428	if (of_property_read_bool(np, "little-endian"))
   1429		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
   1430					sizeof(struct sdhci_esdhc));
   1431	else
   1432		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
   1433					sizeof(struct sdhci_esdhc));
   1434
   1435	if (IS_ERR(host))
   1436		return PTR_ERR(host);
   1437
   1438	host->mmc_host_ops.start_signal_voltage_switch =
   1439		esdhc_signal_voltage_switch;
   1440	host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
   1441	host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
   1442	host->tuning_delay = 1;
   1443
   1444	esdhc_init(pdev, host);
   1445
   1446	sdhci_get_of_property(pdev);
   1447
   1448	pltfm_host = sdhci_priv(host);
   1449	esdhc = sdhci_pltfm_priv(pltfm_host);
   1450	if (soc_device_match(soc_tuning_erratum_type1))
   1451		esdhc->quirk_tuning_erratum_type1 = true;
   1452	else
   1453		esdhc->quirk_tuning_erratum_type1 = false;
   1454
   1455	if (soc_device_match(soc_tuning_erratum_type2))
   1456		esdhc->quirk_tuning_erratum_type2 = true;
   1457	else
   1458		esdhc->quirk_tuning_erratum_type2 = false;
   1459
   1460	if (esdhc->vendor_ver == VENDOR_V_22)
   1461		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
   1462
   1463	if (esdhc->vendor_ver > VENDOR_V_22)
   1464		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
   1465
   1466	if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
   1467		host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
   1468		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
   1469	}
   1470
   1471	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
   1472	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
   1473	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
   1474	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
   1475	    of_device_is_compatible(np, "fsl,t1040-esdhc"))
   1476		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
   1477
   1478	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
   1479		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
   1480
   1481	esdhc->quirk_ignore_data_inhibit = false;
   1482	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
   1483		/*
   1484		 * Freescale messed up with P2020 as it has a non-standard
   1485		 * host control register
   1486		 */
   1487		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
   1488		esdhc->quirk_ignore_data_inhibit = true;
   1489	}
   1490
   1491	/* call to generic mmc_of_parse to support additional capabilities */
   1492	ret = mmc_of_parse(host->mmc);
   1493	if (ret)
   1494		goto err;
   1495
   1496	mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
   1497
   1498	ret = sdhci_add_host(host);
   1499	if (ret)
   1500		goto err;
   1501
   1502	return 0;
   1503 err:
   1504	sdhci_pltfm_free(pdev);
   1505	return ret;
   1506}
   1507
   1508static struct platform_driver sdhci_esdhc_driver = {
   1509	.driver = {
   1510		.name = "sdhci-esdhc",
   1511		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
   1512		.of_match_table = sdhci_esdhc_of_match,
   1513		.pm = &esdhc_of_dev_pm_ops,
   1514	},
   1515	.probe = sdhci_esdhc_probe,
   1516	.remove = sdhci_pltfm_unregister,
   1517};
   1518
   1519module_platform_driver(sdhci_esdhc_driver);
   1520
   1521MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
   1522MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
   1523	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
   1524MODULE_LICENSE("GPL v2");