cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atmel-mci.c (75538B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Atmel MultiMedia Card Interface driver
      4 *
      5 * Copyright (C) 2004-2008 Atmel Corporation
      6 */
      7#include <linux/blkdev.h>
      8#include <linux/clk.h>
      9#include <linux/debugfs.h>
     10#include <linux/device.h>
     11#include <linux/dmaengine.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/err.h>
     14#include <linux/gpio.h>
     15#include <linux/init.h>
     16#include <linux/interrupt.h>
     17#include <linux/io.h>
     18#include <linux/ioport.h>
     19#include <linux/module.h>
     20#include <linux/of.h>
     21#include <linux/of_device.h>
     22#include <linux/of_gpio.h>
     23#include <linux/platform_device.h>
     24#include <linux/scatterlist.h>
     25#include <linux/seq_file.h>
     26#include <linux/slab.h>
     27#include <linux/stat.h>
     28#include <linux/types.h>
     29
     30#include <linux/mmc/host.h>
     31#include <linux/mmc/sdio.h>
     32
     33#include <linux/atmel-mci.h>
     34#include <linux/atmel_pdc.h>
     35#include <linux/pm.h>
     36#include <linux/pm_runtime.h>
     37#include <linux/pinctrl/consumer.h>
     38
     39#include <asm/cacheflush.h>
     40#include <asm/io.h>
     41#include <asm/unaligned.h>
     42
     43/*
     44 * Superset of MCI IP registers integrated in Atmel AT91 Processor
     45 * Registers and bitfields marked with [2] are only available in MCI2
     46 */
     47
     48/* MCI Register Definitions */
     49#define	ATMCI_CR			0x0000	/* Control */
     50#define		ATMCI_CR_MCIEN			BIT(0)		/* MCI Enable */
     51#define		ATMCI_CR_MCIDIS			BIT(1)		/* MCI Disable */
     52#define		ATMCI_CR_PWSEN			BIT(2)		/* Power Save Enable */
     53#define		ATMCI_CR_PWSDIS			BIT(3)		/* Power Save Disable */
     54#define		ATMCI_CR_SWRST			BIT(7)		/* Software Reset */
     55#define	ATMCI_MR			0x0004	/* Mode */
     56#define		ATMCI_MR_CLKDIV(x)		((x) <<  0)	/* Clock Divider */
     57#define		ATMCI_MR_PWSDIV(x)		((x) <<  8)	/* Power Saving Divider */
     58#define		ATMCI_MR_RDPROOF		BIT(11)		/* Read Proof */
     59#define		ATMCI_MR_WRPROOF		BIT(12)		/* Write Proof */
     60#define		ATMCI_MR_PDCFBYTE		BIT(13)		/* Force Byte Transfer */
     61#define		ATMCI_MR_PDCPADV		BIT(14)		/* Padding Value */
     62#define		ATMCI_MR_PDCMODE		BIT(15)		/* PDC-oriented Mode */
     63#define		ATMCI_MR_CLKODD(x)		((x) << 16)	/* LSB of Clock Divider */
     64#define	ATMCI_DTOR			0x0008	/* Data Timeout */
     65#define		ATMCI_DTOCYC(x)			((x) <<  0)	/* Data Timeout Cycles */
     66#define		ATMCI_DTOMUL(x)			((x) <<  4)	/* Data Timeout Multiplier */
     67#define	ATMCI_SDCR			0x000c	/* SD Card / SDIO */
     68#define		ATMCI_SDCSEL_SLOT_A		(0 <<  0)	/* Select SD slot A */
     69#define		ATMCI_SDCSEL_SLOT_B		(1 <<  0)	/* Select SD slot A */
     70#define		ATMCI_SDCSEL_MASK		(3 <<  0)
     71#define		ATMCI_SDCBUS_1BIT		(0 <<  6)	/* 1-bit data bus */
     72#define		ATMCI_SDCBUS_4BIT		(2 <<  6)	/* 4-bit data bus */
     73#define		ATMCI_SDCBUS_8BIT		(3 <<  6)	/* 8-bit data bus[2] */
     74#define		ATMCI_SDCBUS_MASK		(3 <<  6)
     75#define	ATMCI_ARGR			0x0010	/* Command Argument */
     76#define	ATMCI_CMDR			0x0014	/* Command */
     77#define		ATMCI_CMDR_CMDNB(x)		((x) <<  0)	/* Command Opcode */
     78#define		ATMCI_CMDR_RSPTYP_NONE		(0 <<  6)	/* No response */
     79#define		ATMCI_CMDR_RSPTYP_48BIT		(1 <<  6)	/* 48-bit response */
     80#define		ATMCI_CMDR_RSPTYP_136BIT	(2 <<  6)	/* 136-bit response */
     81#define		ATMCI_CMDR_SPCMD_INIT		(1 <<  8)	/* Initialization command */
     82#define		ATMCI_CMDR_SPCMD_SYNC		(2 <<  8)	/* Synchronized command */
     83#define		ATMCI_CMDR_SPCMD_INT		(4 <<  8)	/* Interrupt command */
     84#define		ATMCI_CMDR_SPCMD_INTRESP	(5 <<  8)	/* Interrupt response */
     85#define		ATMCI_CMDR_OPDCMD		(1 << 11)	/* Open Drain */
     86#define		ATMCI_CMDR_MAXLAT_5CYC		(0 << 12)	/* Max latency 5 cycles */
     87#define		ATMCI_CMDR_MAXLAT_64CYC		(1 << 12)	/* Max latency 64 cycles */
     88#define		ATMCI_CMDR_START_XFER		(1 << 16)	/* Start data transfer */
     89#define		ATMCI_CMDR_STOP_XFER		(2 << 16)	/* Stop data transfer */
     90#define		ATMCI_CMDR_TRDIR_WRITE		(0 << 18)	/* Write data */
     91#define		ATMCI_CMDR_TRDIR_READ		(1 << 18)	/* Read data */
     92#define		ATMCI_CMDR_BLOCK		(0 << 19)	/* Single-block transfer */
     93#define		ATMCI_CMDR_MULTI_BLOCK		(1 << 19)	/* Multi-block transfer */
     94#define		ATMCI_CMDR_STREAM		(2 << 19)	/* MMC Stream transfer */
     95#define		ATMCI_CMDR_SDIO_BYTE		(4 << 19)	/* SDIO Byte transfer */
     96#define		ATMCI_CMDR_SDIO_BLOCK		(5 << 19)	/* SDIO Block transfer */
     97#define		ATMCI_CMDR_SDIO_SUSPEND		(1 << 24)	/* SDIO Suspend Command */
     98#define		ATMCI_CMDR_SDIO_RESUME		(2 << 24)	/* SDIO Resume Command */
     99#define	ATMCI_BLKR			0x0018	/* Block */
    100#define		ATMCI_BCNT(x)			((x) <<  0)	/* Data Block Count */
    101#define		ATMCI_BLKLEN(x)			((x) << 16)	/* Data Block Length */
    102#define	ATMCI_CSTOR			0x001c	/* Completion Signal Timeout[2] */
    103#define		ATMCI_CSTOCYC(x)		((x) <<  0)	/* CST cycles */
    104#define		ATMCI_CSTOMUL(x)		((x) <<  4)	/* CST multiplier */
    105#define	ATMCI_RSPR			0x0020	/* Response 0 */
    106#define	ATMCI_RSPR1			0x0024	/* Response 1 */
    107#define	ATMCI_RSPR2			0x0028	/* Response 2 */
    108#define	ATMCI_RSPR3			0x002c	/* Response 3 */
    109#define	ATMCI_RDR			0x0030	/* Receive Data */
    110#define	ATMCI_TDR			0x0034	/* Transmit Data */
    111#define	ATMCI_SR			0x0040	/* Status */
    112#define	ATMCI_IER			0x0044	/* Interrupt Enable */
    113#define	ATMCI_IDR			0x0048	/* Interrupt Disable */
    114#define	ATMCI_IMR			0x004c	/* Interrupt Mask */
    115#define		ATMCI_CMDRDY			BIT(0)		/* Command Ready */
    116#define		ATMCI_RXRDY			BIT(1)		/* Receiver Ready */
    117#define		ATMCI_TXRDY			BIT(2)		/* Transmitter Ready */
    118#define		ATMCI_BLKE			BIT(3)		/* Data Block Ended */
    119#define		ATMCI_DTIP			BIT(4)		/* Data Transfer In Progress */
    120#define		ATMCI_NOTBUSY			BIT(5)		/* Data Not Busy */
    121#define		ATMCI_ENDRX			BIT(6)		/* End of RX Buffer */
    122#define		ATMCI_ENDTX			BIT(7)		/* End of TX Buffer */
    123#define		ATMCI_SDIOIRQA			BIT(8)		/* SDIO IRQ in slot A */
    124#define		ATMCI_SDIOIRQB			BIT(9)		/* SDIO IRQ in slot B */
    125#define		ATMCI_SDIOWAIT			BIT(12)		/* SDIO Read Wait Operation Status */
    126#define		ATMCI_CSRCV			BIT(13)		/* CE-ATA Completion Signal Received */
    127#define		ATMCI_RXBUFF			BIT(14)		/* RX Buffer Full */
    128#define		ATMCI_TXBUFE			BIT(15)		/* TX Buffer Empty */
    129#define		ATMCI_RINDE			BIT(16)		/* Response Index Error */
    130#define		ATMCI_RDIRE			BIT(17)		/* Response Direction Error */
    131#define		ATMCI_RCRCE			BIT(18)		/* Response CRC Error */
    132#define		ATMCI_RENDE			BIT(19)		/* Response End Bit Error */
    133#define		ATMCI_RTOE			BIT(20)		/* Response Time-Out Error */
    134#define		ATMCI_DCRCE			BIT(21)		/* Data CRC Error */
    135#define		ATMCI_DTOE			BIT(22)		/* Data Time-Out Error */
    136#define		ATMCI_CSTOE			BIT(23)		/* Completion Signal Time-out Error */
    137#define		ATMCI_BLKOVRE			BIT(24)		/* DMA Block Overrun Error */
    138#define		ATMCI_DMADONE			BIT(25)		/* DMA Transfer Done */
    139#define		ATMCI_FIFOEMPTY			BIT(26)		/* FIFO Empty Flag */
    140#define		ATMCI_XFRDONE			BIT(27)		/* Transfer Done Flag */
    141#define		ATMCI_ACKRCV			BIT(28)		/* Boot Operation Acknowledge Received */
    142#define		ATMCI_ACKRCVE			BIT(29)		/* Boot Operation Acknowledge Error */
    143#define		ATMCI_OVRE			BIT(30)		/* RX Overrun Error */
    144#define		ATMCI_UNRE			BIT(31)		/* TX Underrun Error */
    145#define	ATMCI_DMA			0x0050	/* DMA Configuration[2] */
    146#define		ATMCI_DMA_OFFSET(x)		((x) <<  0)	/* DMA Write Buffer Offset */
    147#define		ATMCI_DMA_CHKSIZE(x)		((x) <<  4)	/* DMA Channel Read and Write Chunk Size */
    148#define		ATMCI_DMAEN			BIT(8)	/* DMA Hardware Handshaking Enable */
    149#define	ATMCI_CFG			0x0054	/* Configuration[2] */
    150#define		ATMCI_CFG_FIFOMODE_1DATA	BIT(0)		/* MCI Internal FIFO control mode */
    151#define		ATMCI_CFG_FERRCTRL_COR		BIT(4)		/* Flow Error flag reset control mode */
    152#define		ATMCI_CFG_HSMODE		BIT(8)		/* High Speed Mode */
    153#define		ATMCI_CFG_LSYNC			BIT(12)		/* Synchronize on the last block */
    154#define	ATMCI_WPMR			0x00e4	/* Write Protection Mode[2] */
    155#define		ATMCI_WP_EN			BIT(0)		/* WP Enable */
    156#define		ATMCI_WP_KEY			(0x4d4349 << 8)	/* WP Key */
    157#define	ATMCI_WPSR			0x00e8	/* Write Protection Status[2] */
    158#define		ATMCI_GET_WP_VS(x)		((x) & 0x0f)
    159#define		ATMCI_GET_WP_VSRC(x)		(((x) >> 8) & 0xffff)
    160#define	ATMCI_VERSION			0x00FC  /* Version */
    161#define	ATMCI_FIFO_APERTURE		0x0200	/* FIFO Aperture[2] */
    162
    163/* This is not including the FIFO Aperture on MCI2 */
    164#define	ATMCI_REGS_SIZE		0x100
    165
    166/* Register access macros */
    167#define	atmci_readl(port, reg)				\
    168	__raw_readl((port)->regs + reg)
    169#define	atmci_writel(port, reg, value)			\
    170	__raw_writel((value), (port)->regs + reg)
    171
    172#define ATMCI_CMD_TIMEOUT_MS	2000
    173#define AUTOSUSPEND_DELAY	50
    174
    175#define ATMCI_DATA_ERROR_FLAGS	(ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
    176#define ATMCI_DMA_THRESHOLD	16
    177
    178enum {
    179	EVENT_CMD_RDY = 0,
    180	EVENT_XFER_COMPLETE,
    181	EVENT_NOTBUSY,
    182	EVENT_DATA_ERROR,
    183};
    184
    185enum atmel_mci_state {
    186	STATE_IDLE = 0,
    187	STATE_SENDING_CMD,
    188	STATE_DATA_XFER,
    189	STATE_WAITING_NOTBUSY,
    190	STATE_SENDING_STOP,
    191	STATE_END_REQUEST,
    192};
    193
    194enum atmci_xfer_dir {
    195	XFER_RECEIVE = 0,
    196	XFER_TRANSMIT,
    197};
    198
    199enum atmci_pdc_buf {
    200	PDC_FIRST_BUF = 0,
    201	PDC_SECOND_BUF,
    202};
    203
    204struct atmel_mci_caps {
    205	bool    has_dma_conf_reg;
    206	bool    has_pdc;
    207	bool    has_cfg_reg;
    208	bool    has_cstor_reg;
    209	bool    has_highspeed;
    210	bool    has_rwproof;
    211	bool	has_odd_clk_div;
    212	bool	has_bad_data_ordering;
    213	bool	need_reset_after_xfer;
    214	bool	need_blksz_mul_4;
    215	bool	need_notbusy_for_read_ops;
    216};
    217
    218struct atmel_mci_dma {
    219	struct dma_chan			*chan;
    220	struct dma_async_tx_descriptor	*data_desc;
    221};
    222
    223/**
    224 * struct atmel_mci - MMC controller state shared between all slots
    225 * @lock: Spinlock protecting the queue and associated data.
    226 * @regs: Pointer to MMIO registers.
    227 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
    228 * @sg_len: Size of the scatterlist
    229 * @pio_offset: Offset into the current scatterlist entry.
    230 * @buffer: Buffer used if we don't have the r/w proof capability. We
    231 *      don't have the time to switch pdc buffers so we have to use only
    232 *      one buffer for the full transaction.
    233 * @buf_size: size of the buffer.
    234 * @buf_phys_addr: buffer address needed for pdc.
    235 * @cur_slot: The slot which is currently using the controller.
    236 * @mrq: The request currently being processed on @cur_slot,
    237 *	or NULL if the controller is idle.
    238 * @cmd: The command currently being sent to the card, or NULL.
    239 * @data: The data currently being transferred, or NULL if no data
    240 *	transfer is in progress.
    241 * @data_size: just data->blocks * data->blksz.
    242 * @dma: DMA client state.
    243 * @data_chan: DMA channel being used for the current data transfer.
    244 * @dma_conf: Configuration for the DMA slave
    245 * @cmd_status: Snapshot of SR taken upon completion of the current
    246 *	command. Only valid when EVENT_CMD_COMPLETE is pending.
    247 * @data_status: Snapshot of SR taken upon completion of the current
    248 *	data transfer. Only valid when EVENT_DATA_COMPLETE or
    249 *	EVENT_DATA_ERROR is pending.
    250 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
    251 *	to be sent.
    252 * @tasklet: Tasklet running the request state machine.
    253 * @pending_events: Bitmask of events flagged by the interrupt handler
    254 *	to be processed by the tasklet.
    255 * @completed_events: Bitmask of events which the state machine has
    256 *	processed.
    257 * @state: Tasklet state.
    258 * @queue: List of slots waiting for access to the controller.
    259 * @need_clock_update: Update the clock rate before the next request.
    260 * @need_reset: Reset controller before next request.
    261 * @timer: Timer to balance the data timeout error flag which cannot rise.
    262 * @mode_reg: Value of the MR register.
    263 * @cfg_reg: Value of the CFG register.
    264 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
    265 *	rate and timeout calculations.
    266 * @mapbase: Physical address of the MMIO registers.
    267 * @mck: The peripheral bus clock hooked up to the MMC controller.
    268 * @pdev: Platform device associated with the MMC controller.
    269 * @slot: Slots sharing this MMC controller.
    270 * @caps: MCI capabilities depending on MCI version.
    271 * @prepare_data: function to setup MCI before data transfer which
    272 * depends on MCI capabilities.
    273 * @submit_data: function to start data transfer which depends on MCI
    274 * capabilities.
    275 * @stop_transfer: function to stop data transfer which depends on MCI
    276 * capabilities.
    277 *
    278 * Locking
    279 * =======
    280 *
    281 * @lock is a softirq-safe spinlock protecting @queue as well as
    282 * @cur_slot, @mrq and @state. These must always be updated
    283 * at the same time while holding @lock.
    284 *
    285 * @lock also protects mode_reg and need_clock_update since these are
    286 * used to synchronize mode register updates with the queue
    287 * processing.
    288 *
    289 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
    290 * and must always be written at the same time as the slot is added to
    291 * @queue.
    292 *
    293 * @pending_events and @completed_events are accessed using atomic bit
    294 * operations, so they don't need any locking.
    295 *
    296 * None of the fields touched by the interrupt handler need any
    297 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
    298 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
    299 * interrupts must be disabled and @data_status updated with a
    300 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
    301 * CMDRDY interrupt must be disabled and @cmd_status updated with a
    302 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
    303 * bytes_xfered field of @data must be written. This is ensured by
    304 * using barriers.
    305 */
    306struct atmel_mci {
    307	spinlock_t		lock;
    308	void __iomem		*regs;
    309
    310	struct scatterlist	*sg;
    311	unsigned int		sg_len;
    312	unsigned int		pio_offset;
    313	unsigned int		*buffer;
    314	unsigned int		buf_size;
    315	dma_addr_t		buf_phys_addr;
    316
    317	struct atmel_mci_slot	*cur_slot;
    318	struct mmc_request	*mrq;
    319	struct mmc_command	*cmd;
    320	struct mmc_data		*data;
    321	unsigned int		data_size;
    322
    323	struct atmel_mci_dma	dma;
    324	struct dma_chan		*data_chan;
    325	struct dma_slave_config	dma_conf;
    326
    327	u32			cmd_status;
    328	u32			data_status;
    329	u32			stop_cmdr;
    330
    331	struct tasklet_struct	tasklet;
    332	unsigned long		pending_events;
    333	unsigned long		completed_events;
    334	enum atmel_mci_state	state;
    335	struct list_head	queue;
    336
    337	bool			need_clock_update;
    338	bool			need_reset;
    339	struct timer_list	timer;
    340	u32			mode_reg;
    341	u32			cfg_reg;
    342	unsigned long		bus_hz;
    343	unsigned long		mapbase;
    344	struct clk		*mck;
    345	struct platform_device	*pdev;
    346
    347	struct atmel_mci_slot	*slot[ATMCI_MAX_NR_SLOTS];
    348
    349	struct atmel_mci_caps   caps;
    350
    351	u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
    352	void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
    353	void (*stop_transfer)(struct atmel_mci *host);
    354};
    355
    356/**
    357 * struct atmel_mci_slot - MMC slot state
    358 * @mmc: The mmc_host representing this slot.
    359 * @host: The MMC controller this slot is using.
    360 * @sdc_reg: Value of SDCR to be written before using this slot.
    361 * @sdio_irq: SDIO irq mask for this slot.
    362 * @mrq: mmc_request currently being processed or waiting to be
    363 *	processed, or NULL when the slot is idle.
    364 * @queue_node: List node for placing this node in the @queue list of
    365 *	&struct atmel_mci.
    366 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
    367 * @flags: Random state bits associated with the slot.
    368 * @detect_pin: GPIO pin used for card detection, or negative if not
    369 *	available.
    370 * @wp_pin: GPIO pin used for card write protect sending, or negative
    371 *	if not available.
    372 * @detect_is_active_high: The state of the detect pin when it is active.
    373 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
    374 */
    375struct atmel_mci_slot {
    376	struct mmc_host		*mmc;
    377	struct atmel_mci	*host;
    378
    379	u32			sdc_reg;
    380	u32			sdio_irq;
    381
    382	struct mmc_request	*mrq;
    383	struct list_head	queue_node;
    384
    385	unsigned int		clock;
    386	unsigned long		flags;
    387#define ATMCI_CARD_PRESENT	0
    388#define ATMCI_CARD_NEED_INIT	1
    389#define ATMCI_SHUTDOWN		2
    390
    391	int			detect_pin;
    392	int			wp_pin;
    393	bool			detect_is_active_high;
    394
    395	struct timer_list	detect_timer;
    396};
    397
    398#define atmci_test_and_clear_pending(host, event)		\
    399	test_and_clear_bit(event, &host->pending_events)
    400#define atmci_set_completed(host, event)			\
    401	set_bit(event, &host->completed_events)
    402#define atmci_set_pending(host, event)				\
    403	set_bit(event, &host->pending_events)
    404
    405/*
    406 * The debugfs stuff below is mostly optimized away when
    407 * CONFIG_DEBUG_FS is not set.
    408 */
    409static int atmci_req_show(struct seq_file *s, void *v)
    410{
    411	struct atmel_mci_slot	*slot = s->private;
    412	struct mmc_request	*mrq;
    413	struct mmc_command	*cmd;
    414	struct mmc_command	*stop;
    415	struct mmc_data		*data;
    416
    417	/* Make sure we get a consistent snapshot */
    418	spin_lock_bh(&slot->host->lock);
    419	mrq = slot->mrq;
    420
    421	if (mrq) {
    422		cmd = mrq->cmd;
    423		data = mrq->data;
    424		stop = mrq->stop;
    425
    426		if (cmd)
    427			seq_printf(s,
    428				"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
    429				cmd->opcode, cmd->arg, cmd->flags,
    430				cmd->resp[0], cmd->resp[1], cmd->resp[2],
    431				cmd->resp[3], cmd->error);
    432		if (data)
    433			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
    434				data->bytes_xfered, data->blocks,
    435				data->blksz, data->flags, data->error);
    436		if (stop)
    437			seq_printf(s,
    438				"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
    439				stop->opcode, stop->arg, stop->flags,
    440				stop->resp[0], stop->resp[1], stop->resp[2],
    441				stop->resp[3], stop->error);
    442	}
    443
    444	spin_unlock_bh(&slot->host->lock);
    445
    446	return 0;
    447}
    448
    449DEFINE_SHOW_ATTRIBUTE(atmci_req);
    450
    451static void atmci_show_status_reg(struct seq_file *s,
    452		const char *regname, u32 value)
    453{
    454	static const char	*sr_bit[] = {
    455		[0]	= "CMDRDY",
    456		[1]	= "RXRDY",
    457		[2]	= "TXRDY",
    458		[3]	= "BLKE",
    459		[4]	= "DTIP",
    460		[5]	= "NOTBUSY",
    461		[6]	= "ENDRX",
    462		[7]	= "ENDTX",
    463		[8]	= "SDIOIRQA",
    464		[9]	= "SDIOIRQB",
    465		[12]	= "SDIOWAIT",
    466		[14]	= "RXBUFF",
    467		[15]	= "TXBUFE",
    468		[16]	= "RINDE",
    469		[17]	= "RDIRE",
    470		[18]	= "RCRCE",
    471		[19]	= "RENDE",
    472		[20]	= "RTOE",
    473		[21]	= "DCRCE",
    474		[22]	= "DTOE",
    475		[23]	= "CSTOE",
    476		[24]	= "BLKOVRE",
    477		[25]	= "DMADONE",
    478		[26]	= "FIFOEMPTY",
    479		[27]	= "XFRDONE",
    480		[30]	= "OVRE",
    481		[31]	= "UNRE",
    482	};
    483	unsigned int		i;
    484
    485	seq_printf(s, "%s:\t0x%08x", regname, value);
    486	for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
    487		if (value & (1 << i)) {
    488			if (sr_bit[i])
    489				seq_printf(s, " %s", sr_bit[i]);
    490			else
    491				seq_puts(s, " UNKNOWN");
    492		}
    493	}
    494	seq_putc(s, '\n');
    495}
    496
    497static int atmci_regs_show(struct seq_file *s, void *v)
    498{
    499	struct atmel_mci	*host = s->private;
    500	u32			*buf;
    501	int			ret = 0;
    502
    503
    504	buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
    505	if (!buf)
    506		return -ENOMEM;
    507
    508	pm_runtime_get_sync(&host->pdev->dev);
    509
    510	/*
    511	 * Grab a more or less consistent snapshot. Note that we're
    512	 * not disabling interrupts, so IMR and SR may not be
    513	 * consistent.
    514	 */
    515	spin_lock_bh(&host->lock);
    516	memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
    517	spin_unlock_bh(&host->lock);
    518
    519	pm_runtime_mark_last_busy(&host->pdev->dev);
    520	pm_runtime_put_autosuspend(&host->pdev->dev);
    521
    522	seq_printf(s, "MR:\t0x%08x%s%s ",
    523			buf[ATMCI_MR / 4],
    524			buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
    525			buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
    526	if (host->caps.has_odd_clk_div)
    527		seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
    528				((buf[ATMCI_MR / 4] & 0xff) << 1)
    529				| ((buf[ATMCI_MR / 4] >> 16) & 1));
    530	else
    531		seq_printf(s, "CLKDIV=%u\n",
    532				(buf[ATMCI_MR / 4] & 0xff));
    533	seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
    534	seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
    535	seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
    536	seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
    537			buf[ATMCI_BLKR / 4],
    538			buf[ATMCI_BLKR / 4] & 0xffff,
    539			(buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
    540	if (host->caps.has_cstor_reg)
    541		seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
    542
    543	/* Don't read RSPR and RDR; it will consume the data there */
    544
    545	atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
    546	atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
    547
    548	if (host->caps.has_dma_conf_reg) {
    549		u32 val;
    550
    551		val = buf[ATMCI_DMA / 4];
    552		seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
    553				val, val & 3,
    554				((val >> 4) & 3) ?
    555					1 << (((val >> 4) & 3) + 1) : 1,
    556				val & ATMCI_DMAEN ? " DMAEN" : "");
    557	}
    558	if (host->caps.has_cfg_reg) {
    559		u32 val;
    560
    561		val = buf[ATMCI_CFG / 4];
    562		seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
    563				val,
    564				val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
    565				val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
    566				val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
    567				val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
    568	}
    569
    570	kfree(buf);
    571
    572	return ret;
    573}
    574
    575DEFINE_SHOW_ATTRIBUTE(atmci_regs);
    576
    577static void atmci_init_debugfs(struct atmel_mci_slot *slot)
    578{
    579	struct mmc_host		*mmc = slot->mmc;
    580	struct atmel_mci	*host = slot->host;
    581	struct dentry		*root;
    582
    583	root = mmc->debugfs_root;
    584	if (!root)
    585		return;
    586
    587	debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
    588	debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
    589	debugfs_create_u32("state", S_IRUSR, root, &host->state);
    590	debugfs_create_xul("pending_events", S_IRUSR, root,
    591			   &host->pending_events);
    592	debugfs_create_xul("completed_events", S_IRUSR, root,
    593			   &host->completed_events);
    594}
    595
    596#if defined(CONFIG_OF)
    597static const struct of_device_id atmci_dt_ids[] = {
    598	{ .compatible = "atmel,hsmci" },
    599	{ /* sentinel */ }
    600};
    601
    602MODULE_DEVICE_TABLE(of, atmci_dt_ids);
    603
    604static struct mci_platform_data*
    605atmci_of_init(struct platform_device *pdev)
    606{
    607	struct device_node *np = pdev->dev.of_node;
    608	struct device_node *cnp;
    609	struct mci_platform_data *pdata;
    610	u32 slot_id;
    611
    612	if (!np) {
    613		dev_err(&pdev->dev, "device node not found\n");
    614		return ERR_PTR(-EINVAL);
    615	}
    616
    617	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
    618	if (!pdata)
    619		return ERR_PTR(-ENOMEM);
    620
    621	for_each_child_of_node(np, cnp) {
    622		if (of_property_read_u32(cnp, "reg", &slot_id)) {
    623			dev_warn(&pdev->dev, "reg property is missing for %pOF\n",
    624				 cnp);
    625			continue;
    626		}
    627
    628		if (slot_id >= ATMCI_MAX_NR_SLOTS) {
    629			dev_warn(&pdev->dev, "can't have more than %d slots\n",
    630			         ATMCI_MAX_NR_SLOTS);
    631			of_node_put(cnp);
    632			break;
    633		}
    634
    635		if (of_property_read_u32(cnp, "bus-width",
    636		                         &pdata->slot[slot_id].bus_width))
    637			pdata->slot[slot_id].bus_width = 1;
    638
    639		pdata->slot[slot_id].detect_pin =
    640			of_get_named_gpio(cnp, "cd-gpios", 0);
    641
    642		pdata->slot[slot_id].detect_is_active_high =
    643			of_property_read_bool(cnp, "cd-inverted");
    644
    645		pdata->slot[slot_id].non_removable =
    646			of_property_read_bool(cnp, "non-removable");
    647
    648		pdata->slot[slot_id].wp_pin =
    649			of_get_named_gpio(cnp, "wp-gpios", 0);
    650	}
    651
    652	return pdata;
    653}
    654#else /* CONFIG_OF */
    655static inline struct mci_platform_data*
    656atmci_of_init(struct platform_device *dev)
    657{
    658	return ERR_PTR(-EINVAL);
    659}
    660#endif
    661
    662static inline unsigned int atmci_get_version(struct atmel_mci *host)
    663{
    664	return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
    665}
    666
    667/*
    668 * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
    669 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
    670 * With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2,
    671 * 8 -> 3, 16 -> 4.
    672 *
    673 * This can be done by finding most significant bit set.
    674 */
    675static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
    676						 unsigned int maxburst)
    677{
    678	unsigned int version = atmci_get_version(host);
    679	unsigned int offset = 2;
    680
    681	if (version >= 0x600)
    682		offset = 1;
    683
    684	if (maxburst > 1)
    685		return fls(maxburst) - offset;
    686	else
    687		return 0;
    688}
    689
    690static void atmci_timeout_timer(struct timer_list *t)
    691{
    692	struct atmel_mci *host;
    693
    694	host = from_timer(host, t, timer);
    695
    696	dev_dbg(&host->pdev->dev, "software timeout\n");
    697
    698	if (host->mrq->cmd->data) {
    699		host->mrq->cmd->data->error = -ETIMEDOUT;
    700		host->data = NULL;
    701		/*
    702		 * With some SDIO modules, sometimes DMA transfer hangs. If
    703		 * stop_transfer() is not called then the DMA request is not
    704		 * removed, following ones are queued and never computed.
    705		 */
    706		if (host->state == STATE_DATA_XFER)
    707			host->stop_transfer(host);
    708	} else {
    709		host->mrq->cmd->error = -ETIMEDOUT;
    710		host->cmd = NULL;
    711	}
    712	host->need_reset = 1;
    713	host->state = STATE_END_REQUEST;
    714	smp_wmb();
    715	tasklet_schedule(&host->tasklet);
    716}
    717
    718static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
    719					unsigned int ns)
    720{
    721	/*
    722	 * It is easier here to use us instead of ns for the timeout,
    723	 * it prevents from overflows during calculation.
    724	 */
    725	unsigned int us = DIV_ROUND_UP(ns, 1000);
    726
    727	/* Maximum clock frequency is host->bus_hz/2 */
    728	return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
    729}
    730
    731static void atmci_set_timeout(struct atmel_mci *host,
    732		struct atmel_mci_slot *slot, struct mmc_data *data)
    733{
    734	static unsigned	dtomul_to_shift[] = {
    735		0, 4, 7, 8, 10, 12, 16, 20
    736	};
    737	unsigned	timeout;
    738	unsigned	dtocyc;
    739	unsigned	dtomul;
    740
    741	timeout = atmci_ns_to_clocks(host, data->timeout_ns)
    742		+ data->timeout_clks;
    743
    744	for (dtomul = 0; dtomul < 8; dtomul++) {
    745		unsigned shift = dtomul_to_shift[dtomul];
    746		dtocyc = (timeout + (1 << shift) - 1) >> shift;
    747		if (dtocyc < 15)
    748			break;
    749	}
    750
    751	if (dtomul >= 8) {
    752		dtomul = 7;
    753		dtocyc = 15;
    754	}
    755
    756	dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
    757			dtocyc << dtomul_to_shift[dtomul]);
    758	atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
    759}
    760
    761/*
    762 * Return mask with command flags to be enabled for this command.
    763 */
    764static u32 atmci_prepare_command(struct mmc_host *mmc,
    765				 struct mmc_command *cmd)
    766{
    767	struct mmc_data	*data;
    768	u32		cmdr;
    769
    770	cmd->error = -EINPROGRESS;
    771
    772	cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
    773
    774	if (cmd->flags & MMC_RSP_PRESENT) {
    775		if (cmd->flags & MMC_RSP_136)
    776			cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
    777		else
    778			cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
    779	}
    780
    781	/*
    782	 * This should really be MAXLAT_5 for CMD2 and ACMD41, but
    783	 * it's too difficult to determine whether this is an ACMD or
    784	 * not. Better make it 64.
    785	 */
    786	cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
    787
    788	if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
    789		cmdr |= ATMCI_CMDR_OPDCMD;
    790
    791	data = cmd->data;
    792	if (data) {
    793		cmdr |= ATMCI_CMDR_START_XFER;
    794
    795		if (cmd->opcode == SD_IO_RW_EXTENDED) {
    796			cmdr |= ATMCI_CMDR_SDIO_BLOCK;
    797		} else {
    798			if (data->blocks > 1)
    799				cmdr |= ATMCI_CMDR_MULTI_BLOCK;
    800			else
    801				cmdr |= ATMCI_CMDR_BLOCK;
    802		}
    803
    804		if (data->flags & MMC_DATA_READ)
    805			cmdr |= ATMCI_CMDR_TRDIR_READ;
    806	}
    807
    808	return cmdr;
    809}
    810
    811static void atmci_send_command(struct atmel_mci *host,
    812		struct mmc_command *cmd, u32 cmd_flags)
    813{
    814	unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
    815		ATMCI_CMD_TIMEOUT_MS;
    816
    817	WARN_ON(host->cmd);
    818	host->cmd = cmd;
    819
    820	dev_vdbg(&host->pdev->dev,
    821			"start command: ARGR=0x%08x CMDR=0x%08x\n",
    822			cmd->arg, cmd_flags);
    823
    824	atmci_writel(host, ATMCI_ARGR, cmd->arg);
    825	atmci_writel(host, ATMCI_CMDR, cmd_flags);
    826
    827	mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
    828}
    829
    830static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
    831{
    832	dev_dbg(&host->pdev->dev, "send stop command\n");
    833	atmci_send_command(host, data->stop, host->stop_cmdr);
    834	atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
    835}
    836
    837/*
    838 * Configure given PDC buffer taking care of alignement issues.
    839 * Update host->data_size and host->sg.
    840 */
    841static void atmci_pdc_set_single_buf(struct atmel_mci *host,
    842	enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
    843{
    844	u32 pointer_reg, counter_reg;
    845	unsigned int buf_size;
    846
    847	if (dir == XFER_RECEIVE) {
    848		pointer_reg = ATMEL_PDC_RPR;
    849		counter_reg = ATMEL_PDC_RCR;
    850	} else {
    851		pointer_reg = ATMEL_PDC_TPR;
    852		counter_reg = ATMEL_PDC_TCR;
    853	}
    854
    855	if (buf_nb == PDC_SECOND_BUF) {
    856		pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
    857		counter_reg += ATMEL_PDC_SCND_BUF_OFF;
    858	}
    859
    860	if (!host->caps.has_rwproof) {
    861		buf_size = host->buf_size;
    862		atmci_writel(host, pointer_reg, host->buf_phys_addr);
    863	} else {
    864		buf_size = sg_dma_len(host->sg);
    865		atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
    866	}
    867
    868	if (host->data_size <= buf_size) {
    869		if (host->data_size & 0x3) {
    870			/* If size is different from modulo 4, transfer bytes */
    871			atmci_writel(host, counter_reg, host->data_size);
    872			atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
    873		} else {
    874			/* Else transfer 32-bits words */
    875			atmci_writel(host, counter_reg, host->data_size / 4);
    876		}
    877		host->data_size = 0;
    878	} else {
    879		/* We assume the size of a page is 32-bits aligned */
    880		atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
    881		host->data_size -= sg_dma_len(host->sg);
    882		if (host->data_size)
    883			host->sg = sg_next(host->sg);
    884	}
    885}
    886
    887/*
    888 * Configure PDC buffer according to the data size ie configuring one or two
    889 * buffers. Don't use this function if you want to configure only the second
    890 * buffer. In this case, use atmci_pdc_set_single_buf.
    891 */
    892static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
    893{
    894	atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
    895	if (host->data_size)
    896		atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
    897}
    898
    899/*
    900 * Unmap sg lists, called when transfer is finished.
    901 */
    902static void atmci_pdc_cleanup(struct atmel_mci *host)
    903{
    904	struct mmc_data         *data = host->data;
    905
    906	if (data)
    907		dma_unmap_sg(&host->pdev->dev,
    908				data->sg, data->sg_len,
    909				mmc_get_dma_dir(data));
    910}
    911
    912/*
    913 * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
    914 * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
    915 * interrupt needed for both transfer directions.
    916 */
    917static void atmci_pdc_complete(struct atmel_mci *host)
    918{
    919	int transfer_size = host->data->blocks * host->data->blksz;
    920	int i;
    921
    922	atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
    923
    924	if ((!host->caps.has_rwproof)
    925	    && (host->data->flags & MMC_DATA_READ)) {
    926		if (host->caps.has_bad_data_ordering)
    927			for (i = 0; i < transfer_size; i++)
    928				host->buffer[i] = swab32(host->buffer[i]);
    929		sg_copy_from_buffer(host->data->sg, host->data->sg_len,
    930		                    host->buffer, transfer_size);
    931	}
    932
    933	atmci_pdc_cleanup(host);
    934
    935	dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
    936	atmci_set_pending(host, EVENT_XFER_COMPLETE);
    937	tasklet_schedule(&host->tasklet);
    938}
    939
    940static void atmci_dma_cleanup(struct atmel_mci *host)
    941{
    942	struct mmc_data                 *data = host->data;
    943
    944	if (data)
    945		dma_unmap_sg(host->dma.chan->device->dev,
    946				data->sg, data->sg_len,
    947				mmc_get_dma_dir(data));
    948}
    949
    950/*
    951 * This function is called by the DMA driver from tasklet context.
    952 */
    953static void atmci_dma_complete(void *arg)
    954{
    955	struct atmel_mci	*host = arg;
    956	struct mmc_data		*data = host->data;
    957
    958	dev_vdbg(&host->pdev->dev, "DMA complete\n");
    959
    960	if (host->caps.has_dma_conf_reg)
    961		/* Disable DMA hardware handshaking on MCI */
    962		atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
    963
    964	atmci_dma_cleanup(host);
    965
    966	/*
    967	 * If the card was removed, data will be NULL. No point trying
    968	 * to send the stop command or waiting for NBUSY in this case.
    969	 */
    970	if (data) {
    971		dev_dbg(&host->pdev->dev,
    972		        "(%s) set pending xfer complete\n", __func__);
    973		atmci_set_pending(host, EVENT_XFER_COMPLETE);
    974		tasklet_schedule(&host->tasklet);
    975
    976		/*
    977		 * Regardless of what the documentation says, we have
    978		 * to wait for NOTBUSY even after block read
    979		 * operations.
    980		 *
    981		 * When the DMA transfer is complete, the controller
    982		 * may still be reading the CRC from the card, i.e.
    983		 * the data transfer is still in progress and we
    984		 * haven't seen all the potential error bits yet.
    985		 *
    986		 * The interrupt handler will schedule a different
    987		 * tasklet to finish things up when the data transfer
    988		 * is completely done.
    989		 *
    990		 * We may not complete the mmc request here anyway
    991		 * because the mmc layer may call back and cause us to
    992		 * violate the "don't submit new operations from the
    993		 * completion callback" rule of the dma engine
    994		 * framework.
    995		 */
    996		atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
    997	}
    998}
    999
   1000/*
   1001 * Returns a mask of interrupt flags to be enabled after the whole
   1002 * request has been prepared.
   1003 */
   1004static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
   1005{
   1006	u32 iflags;
   1007
   1008	data->error = -EINPROGRESS;
   1009
   1010	host->sg = data->sg;
   1011	host->sg_len = data->sg_len;
   1012	host->data = data;
   1013	host->data_chan = NULL;
   1014
   1015	iflags = ATMCI_DATA_ERROR_FLAGS;
   1016
   1017	/*
   1018	 * Errata: MMC data write operation with less than 12
   1019	 * bytes is impossible.
   1020	 *
   1021	 * Errata: MCI Transmit Data Register (TDR) FIFO
   1022	 * corruption when length is not multiple of 4.
   1023	 */
   1024	if (data->blocks * data->blksz < 12
   1025			|| (data->blocks * data->blksz) & 3)
   1026		host->need_reset = true;
   1027
   1028	host->pio_offset = 0;
   1029	if (data->flags & MMC_DATA_READ)
   1030		iflags |= ATMCI_RXRDY;
   1031	else
   1032		iflags |= ATMCI_TXRDY;
   1033
   1034	return iflags;
   1035}
   1036
   1037/*
   1038 * Set interrupt flags and set block length into the MCI mode register even
   1039 * if this value is also accessible in the MCI block register. It seems to be
   1040 * necessary before the High Speed MCI version. It also map sg and configure
   1041 * PDC registers.
   1042 */
   1043static u32
   1044atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
   1045{
   1046	u32 iflags, tmp;
   1047	int i;
   1048
   1049	data->error = -EINPROGRESS;
   1050
   1051	host->data = data;
   1052	host->sg = data->sg;
   1053	iflags = ATMCI_DATA_ERROR_FLAGS;
   1054
   1055	/* Enable pdc mode */
   1056	atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
   1057
   1058	if (data->flags & MMC_DATA_READ)
   1059		iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
   1060	else
   1061		iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
   1062
   1063	/* Set BLKLEN */
   1064	tmp = atmci_readl(host, ATMCI_MR);
   1065	tmp &= 0x0000ffff;
   1066	tmp |= ATMCI_BLKLEN(data->blksz);
   1067	atmci_writel(host, ATMCI_MR, tmp);
   1068
   1069	/* Configure PDC */
   1070	host->data_size = data->blocks * data->blksz;
   1071	dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
   1072		   mmc_get_dma_dir(data));
   1073
   1074	if ((!host->caps.has_rwproof)
   1075	    && (host->data->flags & MMC_DATA_WRITE)) {
   1076		sg_copy_to_buffer(host->data->sg, host->data->sg_len,
   1077		                  host->buffer, host->data_size);
   1078		if (host->caps.has_bad_data_ordering)
   1079			for (i = 0; i < host->data_size; i++)
   1080				host->buffer[i] = swab32(host->buffer[i]);
   1081	}
   1082
   1083	if (host->data_size)
   1084		atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
   1085				       XFER_RECEIVE : XFER_TRANSMIT);
   1086	return iflags;
   1087}
   1088
   1089static u32
   1090atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
   1091{
   1092	struct dma_chan			*chan;
   1093	struct dma_async_tx_descriptor	*desc;
   1094	struct scatterlist		*sg;
   1095	unsigned int			i;
   1096	enum dma_transfer_direction	slave_dirn;
   1097	unsigned int			sglen;
   1098	u32				maxburst;
   1099	u32 iflags;
   1100
   1101	data->error = -EINPROGRESS;
   1102
   1103	WARN_ON(host->data);
   1104	host->sg = NULL;
   1105	host->data = data;
   1106
   1107	iflags = ATMCI_DATA_ERROR_FLAGS;
   1108
   1109	/*
   1110	 * We don't do DMA on "complex" transfers, i.e. with
   1111	 * non-word-aligned buffers or lengths. Also, we don't bother
   1112	 * with all the DMA setup overhead for short transfers.
   1113	 */
   1114	if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
   1115		return atmci_prepare_data(host, data);
   1116	if (data->blksz & 3)
   1117		return atmci_prepare_data(host, data);
   1118
   1119	for_each_sg(data->sg, sg, data->sg_len, i) {
   1120		if (sg->offset & 3 || sg->length & 3)
   1121			return atmci_prepare_data(host, data);
   1122	}
   1123
   1124	/* If we don't have a channel, we can't do DMA */
   1125	if (!host->dma.chan)
   1126		return -ENODEV;
   1127
   1128	chan = host->dma.chan;
   1129	host->data_chan = chan;
   1130
   1131	if (data->flags & MMC_DATA_READ) {
   1132		host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
   1133		maxburst = atmci_convert_chksize(host,
   1134						 host->dma_conf.src_maxburst);
   1135	} else {
   1136		host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
   1137		maxburst = atmci_convert_chksize(host,
   1138						 host->dma_conf.dst_maxburst);
   1139	}
   1140
   1141	if (host->caps.has_dma_conf_reg)
   1142		atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
   1143			ATMCI_DMAEN);
   1144
   1145	sglen = dma_map_sg(chan->device->dev, data->sg,
   1146			data->sg_len, mmc_get_dma_dir(data));
   1147
   1148	dmaengine_slave_config(chan, &host->dma_conf);
   1149	desc = dmaengine_prep_slave_sg(chan,
   1150			data->sg, sglen, slave_dirn,
   1151			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
   1152	if (!desc)
   1153		goto unmap_exit;
   1154
   1155	host->dma.data_desc = desc;
   1156	desc->callback = atmci_dma_complete;
   1157	desc->callback_param = host;
   1158
   1159	return iflags;
   1160unmap_exit:
   1161	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
   1162		     mmc_get_dma_dir(data));
   1163	return -ENOMEM;
   1164}
   1165
   1166static void
   1167atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
   1168{
   1169	return;
   1170}
   1171
   1172/*
   1173 * Start PDC according to transfer direction.
   1174 */
   1175static void
   1176atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
   1177{
   1178	if (data->flags & MMC_DATA_READ)
   1179		atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
   1180	else
   1181		atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
   1182}
   1183
   1184static void
   1185atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
   1186{
   1187	struct dma_chan			*chan = host->data_chan;
   1188	struct dma_async_tx_descriptor	*desc = host->dma.data_desc;
   1189
   1190	if (chan) {
   1191		dmaengine_submit(desc);
   1192		dma_async_issue_pending(chan);
   1193	}
   1194}
   1195
   1196static void atmci_stop_transfer(struct atmel_mci *host)
   1197{
   1198	dev_dbg(&host->pdev->dev,
   1199	        "(%s) set pending xfer complete\n", __func__);
   1200	atmci_set_pending(host, EVENT_XFER_COMPLETE);
   1201	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   1202}
   1203
   1204/*
   1205 * Stop data transfer because error(s) occurred.
   1206 */
   1207static void atmci_stop_transfer_pdc(struct atmel_mci *host)
   1208{
   1209	atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
   1210}
   1211
   1212static void atmci_stop_transfer_dma(struct atmel_mci *host)
   1213{
   1214	struct dma_chan *chan = host->data_chan;
   1215
   1216	if (chan) {
   1217		dmaengine_terminate_all(chan);
   1218		atmci_dma_cleanup(host);
   1219	} else {
   1220		/* Data transfer was stopped by the interrupt handler */
   1221		dev_dbg(&host->pdev->dev,
   1222		        "(%s) set pending xfer complete\n", __func__);
   1223		atmci_set_pending(host, EVENT_XFER_COMPLETE);
   1224		atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   1225	}
   1226}
   1227
   1228/*
   1229 * Start a request: prepare data if needed, prepare the command and activate
   1230 * interrupts.
   1231 */
   1232static void atmci_start_request(struct atmel_mci *host,
   1233		struct atmel_mci_slot *slot)
   1234{
   1235	struct mmc_request	*mrq;
   1236	struct mmc_command	*cmd;
   1237	struct mmc_data		*data;
   1238	u32			iflags;
   1239	u32			cmdflags;
   1240
   1241	mrq = slot->mrq;
   1242	host->cur_slot = slot;
   1243	host->mrq = mrq;
   1244
   1245	host->pending_events = 0;
   1246	host->completed_events = 0;
   1247	host->cmd_status = 0;
   1248	host->data_status = 0;
   1249
   1250	dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
   1251
   1252	if (host->need_reset || host->caps.need_reset_after_xfer) {
   1253		iflags = atmci_readl(host, ATMCI_IMR);
   1254		iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
   1255		atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
   1256		atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
   1257		atmci_writel(host, ATMCI_MR, host->mode_reg);
   1258		if (host->caps.has_cfg_reg)
   1259			atmci_writel(host, ATMCI_CFG, host->cfg_reg);
   1260		atmci_writel(host, ATMCI_IER, iflags);
   1261		host->need_reset = false;
   1262	}
   1263	atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
   1264
   1265	iflags = atmci_readl(host, ATMCI_IMR);
   1266	if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
   1267		dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
   1268				iflags);
   1269
   1270	if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
   1271		/* Send init sequence (74 clock cycles) */
   1272		atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
   1273		while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
   1274			cpu_relax();
   1275	}
   1276	iflags = 0;
   1277	data = mrq->data;
   1278	if (data) {
   1279		atmci_set_timeout(host, slot, data);
   1280
   1281		/* Must set block count/size before sending command */
   1282		atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
   1283				| ATMCI_BLKLEN(data->blksz));
   1284		dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
   1285			ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
   1286
   1287		iflags |= host->prepare_data(host, data);
   1288	}
   1289
   1290	iflags |= ATMCI_CMDRDY;
   1291	cmd = mrq->cmd;
   1292	cmdflags = atmci_prepare_command(slot->mmc, cmd);
   1293
   1294	/*
   1295	 * DMA transfer should be started before sending the command to avoid
   1296	 * unexpected errors especially for read operations in SDIO mode.
   1297	 * Unfortunately, in PDC mode, command has to be sent before starting
   1298	 * the transfer.
   1299	 */
   1300	if (host->submit_data != &atmci_submit_data_dma)
   1301		atmci_send_command(host, cmd, cmdflags);
   1302
   1303	if (data)
   1304		host->submit_data(host, data);
   1305
   1306	if (host->submit_data == &atmci_submit_data_dma)
   1307		atmci_send_command(host, cmd, cmdflags);
   1308
   1309	if (mrq->stop) {
   1310		host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
   1311		host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
   1312		if (!(data->flags & MMC_DATA_WRITE))
   1313			host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
   1314		host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
   1315	}
   1316
   1317	/*
   1318	 * We could have enabled interrupts earlier, but I suspect
   1319	 * that would open up a nice can of interesting race
   1320	 * conditions (e.g. command and data complete, but stop not
   1321	 * prepared yet.)
   1322	 */
   1323	atmci_writel(host, ATMCI_IER, iflags);
   1324}
   1325
   1326static void atmci_queue_request(struct atmel_mci *host,
   1327		struct atmel_mci_slot *slot, struct mmc_request *mrq)
   1328{
   1329	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
   1330			host->state);
   1331
   1332	spin_lock_bh(&host->lock);
   1333	slot->mrq = mrq;
   1334	if (host->state == STATE_IDLE) {
   1335		host->state = STATE_SENDING_CMD;
   1336		atmci_start_request(host, slot);
   1337	} else {
   1338		dev_dbg(&host->pdev->dev, "queue request\n");
   1339		list_add_tail(&slot->queue_node, &host->queue);
   1340	}
   1341	spin_unlock_bh(&host->lock);
   1342}
   1343
   1344static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
   1345{
   1346	struct atmel_mci_slot	*slot = mmc_priv(mmc);
   1347	struct atmel_mci	*host = slot->host;
   1348	struct mmc_data		*data;
   1349
   1350	WARN_ON(slot->mrq);
   1351	dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
   1352
   1353	/*
   1354	 * We may "know" the card is gone even though there's still an
   1355	 * electrical connection. If so, we really need to communicate
   1356	 * this to the MMC core since there won't be any more
   1357	 * interrupts as the card is completely removed. Otherwise,
   1358	 * the MMC core might believe the card is still there even
   1359	 * though the card was just removed very slowly.
   1360	 */
   1361	if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
   1362		mrq->cmd->error = -ENOMEDIUM;
   1363		mmc_request_done(mmc, mrq);
   1364		return;
   1365	}
   1366
   1367	/* We don't support multiple blocks of weird lengths. */
   1368	data = mrq->data;
   1369	if (data && data->blocks > 1 && data->blksz & 3) {
   1370		mrq->cmd->error = -EINVAL;
   1371		mmc_request_done(mmc, mrq);
   1372	}
   1373
   1374	atmci_queue_request(host, slot, mrq);
   1375}
   1376
   1377static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
   1378{
   1379	struct atmel_mci_slot	*slot = mmc_priv(mmc);
   1380	struct atmel_mci	*host = slot->host;
   1381	unsigned int		i;
   1382
   1383	slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
   1384	switch (ios->bus_width) {
   1385	case MMC_BUS_WIDTH_1:
   1386		slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
   1387		break;
   1388	case MMC_BUS_WIDTH_4:
   1389		slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
   1390		break;
   1391	case MMC_BUS_WIDTH_8:
   1392		slot->sdc_reg |= ATMCI_SDCBUS_8BIT;
   1393		break;
   1394	}
   1395
   1396	if (ios->clock) {
   1397		unsigned int clock_min = ~0U;
   1398		int clkdiv;
   1399
   1400		spin_lock_bh(&host->lock);
   1401		if (!host->mode_reg) {
   1402			atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
   1403			atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
   1404			if (host->caps.has_cfg_reg)
   1405				atmci_writel(host, ATMCI_CFG, host->cfg_reg);
   1406		}
   1407
   1408		/*
   1409		 * Use mirror of ios->clock to prevent race with mmc
   1410		 * core ios update when finding the minimum.
   1411		 */
   1412		slot->clock = ios->clock;
   1413		for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
   1414			if (host->slot[i] && host->slot[i]->clock
   1415					&& host->slot[i]->clock < clock_min)
   1416				clock_min = host->slot[i]->clock;
   1417		}
   1418
   1419		/* Calculate clock divider */
   1420		if (host->caps.has_odd_clk_div) {
   1421			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
   1422			if (clkdiv < 0) {
   1423				dev_warn(&mmc->class_dev,
   1424					 "clock %u too fast; using %lu\n",
   1425					 clock_min, host->bus_hz / 2);
   1426				clkdiv = 0;
   1427			} else if (clkdiv > 511) {
   1428				dev_warn(&mmc->class_dev,
   1429				         "clock %u too slow; using %lu\n",
   1430				         clock_min, host->bus_hz / (511 + 2));
   1431				clkdiv = 511;
   1432			}
   1433			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
   1434			                 | ATMCI_MR_CLKODD(clkdiv & 1);
   1435		} else {
   1436			clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
   1437			if (clkdiv > 255) {
   1438				dev_warn(&mmc->class_dev,
   1439				         "clock %u too slow; using %lu\n",
   1440				         clock_min, host->bus_hz / (2 * 256));
   1441				clkdiv = 255;
   1442			}
   1443			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
   1444		}
   1445
   1446		/*
   1447		 * WRPROOF and RDPROOF prevent overruns/underruns by
   1448		 * stopping the clock when the FIFO is full/empty.
   1449		 * This state is not expected to last for long.
   1450		 */
   1451		if (host->caps.has_rwproof)
   1452			host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
   1453
   1454		if (host->caps.has_cfg_reg) {
   1455			/* setup High Speed mode in relation with card capacity */
   1456			if (ios->timing == MMC_TIMING_SD_HS)
   1457				host->cfg_reg |= ATMCI_CFG_HSMODE;
   1458			else
   1459				host->cfg_reg &= ~ATMCI_CFG_HSMODE;
   1460		}
   1461
   1462		if (list_empty(&host->queue)) {
   1463			atmci_writel(host, ATMCI_MR, host->mode_reg);
   1464			if (host->caps.has_cfg_reg)
   1465				atmci_writel(host, ATMCI_CFG, host->cfg_reg);
   1466		} else {
   1467			host->need_clock_update = true;
   1468		}
   1469
   1470		spin_unlock_bh(&host->lock);
   1471	} else {
   1472		bool any_slot_active = false;
   1473
   1474		spin_lock_bh(&host->lock);
   1475		slot->clock = 0;
   1476		for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
   1477			if (host->slot[i] && host->slot[i]->clock) {
   1478				any_slot_active = true;
   1479				break;
   1480			}
   1481		}
   1482		if (!any_slot_active) {
   1483			atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
   1484			if (host->mode_reg) {
   1485				atmci_readl(host, ATMCI_MR);
   1486			}
   1487			host->mode_reg = 0;
   1488		}
   1489		spin_unlock_bh(&host->lock);
   1490	}
   1491
   1492	switch (ios->power_mode) {
   1493	case MMC_POWER_OFF:
   1494		if (!IS_ERR(mmc->supply.vmmc))
   1495			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
   1496		break;
   1497	case MMC_POWER_UP:
   1498		set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
   1499		if (!IS_ERR(mmc->supply.vmmc))
   1500			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
   1501		break;
   1502	default:
   1503		break;
   1504	}
   1505}
   1506
   1507static int atmci_get_ro(struct mmc_host *mmc)
   1508{
   1509	int			read_only = -ENOSYS;
   1510	struct atmel_mci_slot	*slot = mmc_priv(mmc);
   1511
   1512	if (gpio_is_valid(slot->wp_pin)) {
   1513		read_only = gpio_get_value(slot->wp_pin);
   1514		dev_dbg(&mmc->class_dev, "card is %s\n",
   1515				read_only ? "read-only" : "read-write");
   1516	}
   1517
   1518	return read_only;
   1519}
   1520
   1521static int atmci_get_cd(struct mmc_host *mmc)
   1522{
   1523	int			present = -ENOSYS;
   1524	struct atmel_mci_slot	*slot = mmc_priv(mmc);
   1525
   1526	if (gpio_is_valid(slot->detect_pin)) {
   1527		present = !(gpio_get_value(slot->detect_pin) ^
   1528			    slot->detect_is_active_high);
   1529		dev_dbg(&mmc->class_dev, "card is %spresent\n",
   1530				present ? "" : "not ");
   1531	}
   1532
   1533	return present;
   1534}
   1535
   1536static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
   1537{
   1538	struct atmel_mci_slot	*slot = mmc_priv(mmc);
   1539	struct atmel_mci	*host = slot->host;
   1540
   1541	if (enable)
   1542		atmci_writel(host, ATMCI_IER, slot->sdio_irq);
   1543	else
   1544		atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
   1545}
   1546
   1547static const struct mmc_host_ops atmci_ops = {
   1548	.request	= atmci_request,
   1549	.set_ios	= atmci_set_ios,
   1550	.get_ro		= atmci_get_ro,
   1551	.get_cd		= atmci_get_cd,
   1552	.enable_sdio_irq = atmci_enable_sdio_irq,
   1553};
   1554
   1555/* Called with host->lock held */
   1556static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
   1557	__releases(&host->lock)
   1558	__acquires(&host->lock)
   1559{
   1560	struct atmel_mci_slot	*slot = NULL;
   1561	struct mmc_host		*prev_mmc = host->cur_slot->mmc;
   1562
   1563	WARN_ON(host->cmd || host->data);
   1564
   1565	del_timer(&host->timer);
   1566
   1567	/*
   1568	 * Update the MMC clock rate if necessary. This may be
   1569	 * necessary if set_ios() is called when a different slot is
   1570	 * busy transferring data.
   1571	 */
   1572	if (host->need_clock_update) {
   1573		atmci_writel(host, ATMCI_MR, host->mode_reg);
   1574		if (host->caps.has_cfg_reg)
   1575			atmci_writel(host, ATMCI_CFG, host->cfg_reg);
   1576	}
   1577
   1578	host->cur_slot->mrq = NULL;
   1579	host->mrq = NULL;
   1580	if (!list_empty(&host->queue)) {
   1581		slot = list_entry(host->queue.next,
   1582				struct atmel_mci_slot, queue_node);
   1583		list_del(&slot->queue_node);
   1584		dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
   1585				mmc_hostname(slot->mmc));
   1586		host->state = STATE_SENDING_CMD;
   1587		atmci_start_request(host, slot);
   1588	} else {
   1589		dev_vdbg(&host->pdev->dev, "list empty\n");
   1590		host->state = STATE_IDLE;
   1591	}
   1592
   1593	spin_unlock(&host->lock);
   1594	mmc_request_done(prev_mmc, mrq);
   1595	spin_lock(&host->lock);
   1596}
   1597
   1598static void atmci_command_complete(struct atmel_mci *host,
   1599			struct mmc_command *cmd)
   1600{
   1601	u32		status = host->cmd_status;
   1602
   1603	/* Read the response from the card (up to 16 bytes) */
   1604	cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
   1605	cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
   1606	cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
   1607	cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
   1608
   1609	if (status & ATMCI_RTOE)
   1610		cmd->error = -ETIMEDOUT;
   1611	else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
   1612		cmd->error = -EILSEQ;
   1613	else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
   1614		cmd->error = -EIO;
   1615	else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
   1616		if (host->caps.need_blksz_mul_4) {
   1617			cmd->error = -EINVAL;
   1618			host->need_reset = 1;
   1619		}
   1620	} else
   1621		cmd->error = 0;
   1622}
   1623
   1624static void atmci_detect_change(struct timer_list *t)
   1625{
   1626	struct atmel_mci_slot	*slot = from_timer(slot, t, detect_timer);
   1627	bool			present;
   1628	bool			present_old;
   1629
   1630	/*
   1631	 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
   1632	 * freeing the interrupt. We must not re-enable the interrupt
   1633	 * if it has been freed, and if we're shutting down, it
   1634	 * doesn't really matter whether the card is present or not.
   1635	 */
   1636	smp_rmb();
   1637	if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
   1638		return;
   1639
   1640	enable_irq(gpio_to_irq(slot->detect_pin));
   1641	present = !(gpio_get_value(slot->detect_pin) ^
   1642		    slot->detect_is_active_high);
   1643	present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
   1644
   1645	dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
   1646			present, present_old);
   1647
   1648	if (present != present_old) {
   1649		struct atmel_mci	*host = slot->host;
   1650		struct mmc_request	*mrq;
   1651
   1652		dev_dbg(&slot->mmc->class_dev, "card %s\n",
   1653			present ? "inserted" : "removed");
   1654
   1655		spin_lock(&host->lock);
   1656
   1657		if (!present)
   1658			clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
   1659		else
   1660			set_bit(ATMCI_CARD_PRESENT, &slot->flags);
   1661
   1662		/* Clean up queue if present */
   1663		mrq = slot->mrq;
   1664		if (mrq) {
   1665			if (mrq == host->mrq) {
   1666				/*
   1667				 * Reset controller to terminate any ongoing
   1668				 * commands or data transfers.
   1669				 */
   1670				atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
   1671				atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
   1672				atmci_writel(host, ATMCI_MR, host->mode_reg);
   1673				if (host->caps.has_cfg_reg)
   1674					atmci_writel(host, ATMCI_CFG, host->cfg_reg);
   1675
   1676				host->data = NULL;
   1677				host->cmd = NULL;
   1678
   1679				switch (host->state) {
   1680				case STATE_IDLE:
   1681					break;
   1682				case STATE_SENDING_CMD:
   1683					mrq->cmd->error = -ENOMEDIUM;
   1684					if (mrq->data)
   1685						host->stop_transfer(host);
   1686					break;
   1687				case STATE_DATA_XFER:
   1688					mrq->data->error = -ENOMEDIUM;
   1689					host->stop_transfer(host);
   1690					break;
   1691				case STATE_WAITING_NOTBUSY:
   1692					mrq->data->error = -ENOMEDIUM;
   1693					break;
   1694				case STATE_SENDING_STOP:
   1695					mrq->stop->error = -ENOMEDIUM;
   1696					break;
   1697				case STATE_END_REQUEST:
   1698					break;
   1699				}
   1700
   1701				atmci_request_end(host, mrq);
   1702			} else {
   1703				list_del(&slot->queue_node);
   1704				mrq->cmd->error = -ENOMEDIUM;
   1705				if (mrq->data)
   1706					mrq->data->error = -ENOMEDIUM;
   1707				if (mrq->stop)
   1708					mrq->stop->error = -ENOMEDIUM;
   1709
   1710				spin_unlock(&host->lock);
   1711				mmc_request_done(slot->mmc, mrq);
   1712				spin_lock(&host->lock);
   1713			}
   1714		}
   1715		spin_unlock(&host->lock);
   1716
   1717		mmc_detect_change(slot->mmc, 0);
   1718	}
   1719}
   1720
   1721static void atmci_tasklet_func(struct tasklet_struct *t)
   1722{
   1723	struct atmel_mci        *host = from_tasklet(host, t, tasklet);
   1724	struct mmc_request	*mrq = host->mrq;
   1725	struct mmc_data		*data = host->data;
   1726	enum atmel_mci_state	state = host->state;
   1727	enum atmel_mci_state	prev_state;
   1728	u32			status;
   1729
   1730	spin_lock(&host->lock);
   1731
   1732	state = host->state;
   1733
   1734	dev_vdbg(&host->pdev->dev,
   1735		"tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
   1736		state, host->pending_events, host->completed_events,
   1737		atmci_readl(host, ATMCI_IMR));
   1738
   1739	do {
   1740		prev_state = state;
   1741		dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
   1742
   1743		switch (state) {
   1744		case STATE_IDLE:
   1745			break;
   1746
   1747		case STATE_SENDING_CMD:
   1748			/*
   1749			 * Command has been sent, we are waiting for command
   1750			 * ready. Then we have three next states possible:
   1751			 * END_REQUEST by default, WAITING_NOTBUSY if it's a
   1752			 * command needing it or DATA_XFER if there is data.
   1753			 */
   1754			dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
   1755			if (!atmci_test_and_clear_pending(host,
   1756						EVENT_CMD_RDY))
   1757				break;
   1758
   1759			dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
   1760			host->cmd = NULL;
   1761			atmci_set_completed(host, EVENT_CMD_RDY);
   1762			atmci_command_complete(host, mrq->cmd);
   1763			if (mrq->data) {
   1764				dev_dbg(&host->pdev->dev,
   1765				        "command with data transfer");
   1766				/*
   1767				 * If there is a command error don't start
   1768				 * data transfer.
   1769				 */
   1770				if (mrq->cmd->error) {
   1771					host->stop_transfer(host);
   1772					host->data = NULL;
   1773					atmci_writel(host, ATMCI_IDR,
   1774					             ATMCI_TXRDY | ATMCI_RXRDY
   1775					             | ATMCI_DATA_ERROR_FLAGS);
   1776					state = STATE_END_REQUEST;
   1777				} else
   1778					state = STATE_DATA_XFER;
   1779			} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
   1780				dev_dbg(&host->pdev->dev,
   1781				        "command response need waiting notbusy");
   1782				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   1783				state = STATE_WAITING_NOTBUSY;
   1784			} else
   1785				state = STATE_END_REQUEST;
   1786
   1787			break;
   1788
   1789		case STATE_DATA_XFER:
   1790			if (atmci_test_and_clear_pending(host,
   1791						EVENT_DATA_ERROR)) {
   1792				dev_dbg(&host->pdev->dev, "set completed data error\n");
   1793				atmci_set_completed(host, EVENT_DATA_ERROR);
   1794				state = STATE_END_REQUEST;
   1795				break;
   1796			}
   1797
   1798			/*
   1799			 * A data transfer is in progress. The event expected
   1800			 * to move to the next state depends of data transfer
   1801			 * type (PDC or DMA). Once transfer done we can move
   1802			 * to the next step which is WAITING_NOTBUSY in write
   1803			 * case and directly SENDING_STOP in read case.
   1804			 */
   1805			dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
   1806			if (!atmci_test_and_clear_pending(host,
   1807						EVENT_XFER_COMPLETE))
   1808				break;
   1809
   1810			dev_dbg(&host->pdev->dev,
   1811			        "(%s) set completed xfer complete\n",
   1812				__func__);
   1813			atmci_set_completed(host, EVENT_XFER_COMPLETE);
   1814
   1815			if (host->caps.need_notbusy_for_read_ops ||
   1816			   (host->data->flags & MMC_DATA_WRITE)) {
   1817				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   1818				state = STATE_WAITING_NOTBUSY;
   1819			} else if (host->mrq->stop) {
   1820				atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
   1821				atmci_send_stop_cmd(host, data);
   1822				state = STATE_SENDING_STOP;
   1823			} else {
   1824				host->data = NULL;
   1825				data->bytes_xfered = data->blocks * data->blksz;
   1826				data->error = 0;
   1827				state = STATE_END_REQUEST;
   1828			}
   1829			break;
   1830
   1831		case STATE_WAITING_NOTBUSY:
   1832			/*
   1833			 * We can be in the state for two reasons: a command
   1834			 * requiring waiting not busy signal (stop command
   1835			 * included) or a write operation. In the latest case,
   1836			 * we need to send a stop command.
   1837			 */
   1838			dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
   1839			if (!atmci_test_and_clear_pending(host,
   1840						EVENT_NOTBUSY))
   1841				break;
   1842
   1843			dev_dbg(&host->pdev->dev, "set completed not busy\n");
   1844			atmci_set_completed(host, EVENT_NOTBUSY);
   1845
   1846			if (host->data) {
   1847				/*
   1848				 * For some commands such as CMD53, even if
   1849				 * there is data transfer, there is no stop
   1850				 * command to send.
   1851				 */
   1852				if (host->mrq->stop) {
   1853					atmci_writel(host, ATMCI_IER,
   1854					             ATMCI_CMDRDY);
   1855					atmci_send_stop_cmd(host, data);
   1856					state = STATE_SENDING_STOP;
   1857				} else {
   1858					host->data = NULL;
   1859					data->bytes_xfered = data->blocks
   1860					                     * data->blksz;
   1861					data->error = 0;
   1862					state = STATE_END_REQUEST;
   1863				}
   1864			} else
   1865				state = STATE_END_REQUEST;
   1866			break;
   1867
   1868		case STATE_SENDING_STOP:
   1869			/*
   1870			 * In this state, it is important to set host->data to
   1871			 * NULL (which is tested in the waiting notbusy state)
   1872			 * in order to go to the end request state instead of
   1873			 * sending stop again.
   1874			 */
   1875			dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
   1876			if (!atmci_test_and_clear_pending(host,
   1877						EVENT_CMD_RDY))
   1878				break;
   1879
   1880			dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
   1881			host->cmd = NULL;
   1882			data->bytes_xfered = data->blocks * data->blksz;
   1883			data->error = 0;
   1884			atmci_command_complete(host, mrq->stop);
   1885			if (mrq->stop->error) {
   1886				host->stop_transfer(host);
   1887				atmci_writel(host, ATMCI_IDR,
   1888				             ATMCI_TXRDY | ATMCI_RXRDY
   1889				             | ATMCI_DATA_ERROR_FLAGS);
   1890				state = STATE_END_REQUEST;
   1891			} else {
   1892				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   1893				state = STATE_WAITING_NOTBUSY;
   1894			}
   1895			host->data = NULL;
   1896			break;
   1897
   1898		case STATE_END_REQUEST:
   1899			atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
   1900			                   | ATMCI_DATA_ERROR_FLAGS);
   1901			status = host->data_status;
   1902			if (unlikely(status)) {
   1903				host->stop_transfer(host);
   1904				host->data = NULL;
   1905				if (data) {
   1906					if (status & ATMCI_DTOE) {
   1907						data->error = -ETIMEDOUT;
   1908					} else if (status & ATMCI_DCRCE) {
   1909						data->error = -EILSEQ;
   1910					} else {
   1911						data->error = -EIO;
   1912					}
   1913				}
   1914			}
   1915
   1916			atmci_request_end(host, host->mrq);
   1917			goto unlock; /* atmci_request_end() sets host->state */
   1918			break;
   1919		}
   1920	} while (state != prev_state);
   1921
   1922	host->state = state;
   1923
   1924unlock:
   1925	spin_unlock(&host->lock);
   1926}
   1927
   1928static void atmci_read_data_pio(struct atmel_mci *host)
   1929{
   1930	struct scatterlist	*sg = host->sg;
   1931	unsigned int		offset = host->pio_offset;
   1932	struct mmc_data		*data = host->data;
   1933	u32			value;
   1934	u32			status;
   1935	unsigned int		nbytes = 0;
   1936
   1937	do {
   1938		value = atmci_readl(host, ATMCI_RDR);
   1939		if (likely(offset + 4 <= sg->length)) {
   1940			sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
   1941
   1942			offset += 4;
   1943			nbytes += 4;
   1944
   1945			if (offset == sg->length) {
   1946				flush_dcache_page(sg_page(sg));
   1947				host->sg = sg = sg_next(sg);
   1948				host->sg_len--;
   1949				if (!sg || !host->sg_len)
   1950					goto done;
   1951
   1952				offset = 0;
   1953			}
   1954		} else {
   1955			unsigned int remaining = sg->length - offset;
   1956
   1957			sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
   1958			nbytes += remaining;
   1959
   1960			flush_dcache_page(sg_page(sg));
   1961			host->sg = sg = sg_next(sg);
   1962			host->sg_len--;
   1963			if (!sg || !host->sg_len)
   1964				goto done;
   1965
   1966			offset = 4 - remaining;
   1967			sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
   1968					offset, 0);
   1969			nbytes += offset;
   1970		}
   1971
   1972		status = atmci_readl(host, ATMCI_SR);
   1973		if (status & ATMCI_DATA_ERROR_FLAGS) {
   1974			atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
   1975						| ATMCI_DATA_ERROR_FLAGS));
   1976			host->data_status = status;
   1977			data->bytes_xfered += nbytes;
   1978			return;
   1979		}
   1980	} while (status & ATMCI_RXRDY);
   1981
   1982	host->pio_offset = offset;
   1983	data->bytes_xfered += nbytes;
   1984
   1985	return;
   1986
   1987done:
   1988	atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
   1989	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   1990	data->bytes_xfered += nbytes;
   1991	smp_wmb();
   1992	atmci_set_pending(host, EVENT_XFER_COMPLETE);
   1993}
   1994
   1995static void atmci_write_data_pio(struct atmel_mci *host)
   1996{
   1997	struct scatterlist	*sg = host->sg;
   1998	unsigned int		offset = host->pio_offset;
   1999	struct mmc_data		*data = host->data;
   2000	u32			value;
   2001	u32			status;
   2002	unsigned int		nbytes = 0;
   2003
   2004	do {
   2005		if (likely(offset + 4 <= sg->length)) {
   2006			sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
   2007			atmci_writel(host, ATMCI_TDR, value);
   2008
   2009			offset += 4;
   2010			nbytes += 4;
   2011			if (offset == sg->length) {
   2012				host->sg = sg = sg_next(sg);
   2013				host->sg_len--;
   2014				if (!sg || !host->sg_len)
   2015					goto done;
   2016
   2017				offset = 0;
   2018			}
   2019		} else {
   2020			unsigned int remaining = sg->length - offset;
   2021
   2022			value = 0;
   2023			sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
   2024			nbytes += remaining;
   2025
   2026			host->sg = sg = sg_next(sg);
   2027			host->sg_len--;
   2028			if (!sg || !host->sg_len) {
   2029				atmci_writel(host, ATMCI_TDR, value);
   2030				goto done;
   2031			}
   2032
   2033			offset = 4 - remaining;
   2034			sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
   2035					offset, 0);
   2036			atmci_writel(host, ATMCI_TDR, value);
   2037			nbytes += offset;
   2038		}
   2039
   2040		status = atmci_readl(host, ATMCI_SR);
   2041		if (status & ATMCI_DATA_ERROR_FLAGS) {
   2042			atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
   2043						| ATMCI_DATA_ERROR_FLAGS));
   2044			host->data_status = status;
   2045			data->bytes_xfered += nbytes;
   2046			return;
   2047		}
   2048	} while (status & ATMCI_TXRDY);
   2049
   2050	host->pio_offset = offset;
   2051	data->bytes_xfered += nbytes;
   2052
   2053	return;
   2054
   2055done:
   2056	atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
   2057	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
   2058	data->bytes_xfered += nbytes;
   2059	smp_wmb();
   2060	atmci_set_pending(host, EVENT_XFER_COMPLETE);
   2061}
   2062
   2063static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
   2064{
   2065	int	i;
   2066
   2067	for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
   2068		struct atmel_mci_slot *slot = host->slot[i];
   2069		if (slot && (status & slot->sdio_irq)) {
   2070			mmc_signal_sdio_irq(slot->mmc);
   2071		}
   2072	}
   2073}
   2074
   2075
   2076static irqreturn_t atmci_interrupt(int irq, void *dev_id)
   2077{
   2078	struct atmel_mci	*host = dev_id;
   2079	u32			status, mask, pending;
   2080	unsigned int		pass_count = 0;
   2081
   2082	do {
   2083		status = atmci_readl(host, ATMCI_SR);
   2084		mask = atmci_readl(host, ATMCI_IMR);
   2085		pending = status & mask;
   2086		if (!pending)
   2087			break;
   2088
   2089		if (pending & ATMCI_DATA_ERROR_FLAGS) {
   2090			dev_dbg(&host->pdev->dev, "IRQ: data error\n");
   2091			atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
   2092					| ATMCI_RXRDY | ATMCI_TXRDY
   2093					| ATMCI_ENDRX | ATMCI_ENDTX
   2094					| ATMCI_RXBUFF | ATMCI_TXBUFE);
   2095
   2096			host->data_status = status;
   2097			dev_dbg(&host->pdev->dev, "set pending data error\n");
   2098			smp_wmb();
   2099			atmci_set_pending(host, EVENT_DATA_ERROR);
   2100			tasklet_schedule(&host->tasklet);
   2101		}
   2102
   2103		if (pending & ATMCI_TXBUFE) {
   2104			dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
   2105			atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
   2106			atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
   2107			/*
   2108			 * We can receive this interruption before having configured
   2109			 * the second pdc buffer, so we need to reconfigure first and
   2110			 * second buffers again
   2111			 */
   2112			if (host->data_size) {
   2113				atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
   2114				atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
   2115				atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
   2116			} else {
   2117				atmci_pdc_complete(host);
   2118			}
   2119		} else if (pending & ATMCI_ENDTX) {
   2120			dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
   2121			atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
   2122
   2123			if (host->data_size) {
   2124				atmci_pdc_set_single_buf(host,
   2125						XFER_TRANSMIT, PDC_SECOND_BUF);
   2126				atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
   2127			}
   2128		}
   2129
   2130		if (pending & ATMCI_RXBUFF) {
   2131			dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
   2132			atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
   2133			atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
   2134			/*
   2135			 * We can receive this interruption before having configured
   2136			 * the second pdc buffer, so we need to reconfigure first and
   2137			 * second buffers again
   2138			 */
   2139			if (host->data_size) {
   2140				atmci_pdc_set_both_buf(host, XFER_RECEIVE);
   2141				atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
   2142				atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
   2143			} else {
   2144				atmci_pdc_complete(host);
   2145			}
   2146		} else if (pending & ATMCI_ENDRX) {
   2147			dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
   2148			atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
   2149
   2150			if (host->data_size) {
   2151				atmci_pdc_set_single_buf(host,
   2152						XFER_RECEIVE, PDC_SECOND_BUF);
   2153				atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
   2154			}
   2155		}
   2156
   2157		/*
   2158		 * First mci IPs, so mainly the ones having pdc, have some
   2159		 * issues with the notbusy signal. You can't get it after
   2160		 * data transmission if you have not sent a stop command.
   2161		 * The appropriate workaround is to use the BLKE signal.
   2162		 */
   2163		if (pending & ATMCI_BLKE) {
   2164			dev_dbg(&host->pdev->dev, "IRQ: blke\n");
   2165			atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
   2166			smp_wmb();
   2167			dev_dbg(&host->pdev->dev, "set pending notbusy\n");
   2168			atmci_set_pending(host, EVENT_NOTBUSY);
   2169			tasklet_schedule(&host->tasklet);
   2170		}
   2171
   2172		if (pending & ATMCI_NOTBUSY) {
   2173			dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
   2174			atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
   2175			smp_wmb();
   2176			dev_dbg(&host->pdev->dev, "set pending notbusy\n");
   2177			atmci_set_pending(host, EVENT_NOTBUSY);
   2178			tasklet_schedule(&host->tasklet);
   2179		}
   2180
   2181		if (pending & ATMCI_RXRDY)
   2182			atmci_read_data_pio(host);
   2183		if (pending & ATMCI_TXRDY)
   2184			atmci_write_data_pio(host);
   2185
   2186		if (pending & ATMCI_CMDRDY) {
   2187			dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
   2188			atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
   2189			host->cmd_status = status;
   2190			smp_wmb();
   2191			dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
   2192			atmci_set_pending(host, EVENT_CMD_RDY);
   2193			tasklet_schedule(&host->tasklet);
   2194		}
   2195
   2196		if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
   2197			atmci_sdio_interrupt(host, status);
   2198
   2199	} while (pass_count++ < 5);
   2200
   2201	return pass_count ? IRQ_HANDLED : IRQ_NONE;
   2202}
   2203
   2204static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
   2205{
   2206	struct atmel_mci_slot	*slot = dev_id;
   2207
   2208	/*
   2209	 * Disable interrupts until the pin has stabilized and check
   2210	 * the state then. Use mod_timer() since we may be in the
   2211	 * middle of the timer routine when this interrupt triggers.
   2212	 */
   2213	disable_irq_nosync(irq);
   2214	mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
   2215
   2216	return IRQ_HANDLED;
   2217}
   2218
   2219static int atmci_init_slot(struct atmel_mci *host,
   2220		struct mci_slot_pdata *slot_data, unsigned int id,
   2221		u32 sdc_reg, u32 sdio_irq)
   2222{
   2223	struct mmc_host			*mmc;
   2224	struct atmel_mci_slot		*slot;
   2225
   2226	mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
   2227	if (!mmc)
   2228		return -ENOMEM;
   2229
   2230	slot = mmc_priv(mmc);
   2231	slot->mmc = mmc;
   2232	slot->host = host;
   2233	slot->detect_pin = slot_data->detect_pin;
   2234	slot->wp_pin = slot_data->wp_pin;
   2235	slot->detect_is_active_high = slot_data->detect_is_active_high;
   2236	slot->sdc_reg = sdc_reg;
   2237	slot->sdio_irq = sdio_irq;
   2238
   2239	dev_dbg(&mmc->class_dev,
   2240	        "slot[%u]: bus_width=%u, detect_pin=%d, "
   2241		"detect_is_active_high=%s, wp_pin=%d\n",
   2242		id, slot_data->bus_width, slot_data->detect_pin,
   2243		slot_data->detect_is_active_high ? "true" : "false",
   2244		slot_data->wp_pin);
   2245
   2246	mmc->ops = &atmci_ops;
   2247	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
   2248	mmc->f_max = host->bus_hz / 2;
   2249	mmc->ocr_avail	= MMC_VDD_32_33 | MMC_VDD_33_34;
   2250	if (sdio_irq)
   2251		mmc->caps |= MMC_CAP_SDIO_IRQ;
   2252	if (host->caps.has_highspeed)
   2253		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
   2254	/*
   2255	 * Without the read/write proof capability, it is strongly suggested to
   2256	 * use only one bit for data to prevent fifo underruns and overruns
   2257	 * which will corrupt data.
   2258	 */
   2259	if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) {
   2260		mmc->caps |= MMC_CAP_4_BIT_DATA;
   2261		if (slot_data->bus_width >= 8)
   2262			mmc->caps |= MMC_CAP_8_BIT_DATA;
   2263	}
   2264
   2265	if (atmci_get_version(host) < 0x200) {
   2266		mmc->max_segs = 256;
   2267		mmc->max_blk_size = 4095;
   2268		mmc->max_blk_count = 256;
   2269		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
   2270		mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
   2271	} else {
   2272		mmc->max_segs = 64;
   2273		mmc->max_req_size = 32768 * 512;
   2274		mmc->max_blk_size = 32768;
   2275		mmc->max_blk_count = 512;
   2276	}
   2277
   2278	/* Assume card is present initially */
   2279	set_bit(ATMCI_CARD_PRESENT, &slot->flags);
   2280	if (gpio_is_valid(slot->detect_pin)) {
   2281		if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
   2282				      "mmc_detect")) {
   2283			dev_dbg(&mmc->class_dev, "no detect pin available\n");
   2284			slot->detect_pin = -EBUSY;
   2285		} else if (gpio_get_value(slot->detect_pin) ^
   2286				slot->detect_is_active_high) {
   2287			clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
   2288		}
   2289	}
   2290
   2291	if (!gpio_is_valid(slot->detect_pin)) {
   2292		if (slot_data->non_removable)
   2293			mmc->caps |= MMC_CAP_NONREMOVABLE;
   2294		else
   2295			mmc->caps |= MMC_CAP_NEEDS_POLL;
   2296	}
   2297
   2298	if (gpio_is_valid(slot->wp_pin)) {
   2299		if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
   2300				      "mmc_wp")) {
   2301			dev_dbg(&mmc->class_dev, "no WP pin available\n");
   2302			slot->wp_pin = -EBUSY;
   2303		}
   2304	}
   2305
   2306	host->slot[id] = slot;
   2307	mmc_regulator_get_supply(mmc);
   2308	mmc_add_host(mmc);
   2309
   2310	if (gpio_is_valid(slot->detect_pin)) {
   2311		int ret;
   2312
   2313		timer_setup(&slot->detect_timer, atmci_detect_change, 0);
   2314
   2315		ret = request_irq(gpio_to_irq(slot->detect_pin),
   2316				atmci_detect_interrupt,
   2317				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
   2318				"mmc-detect", slot);
   2319		if (ret) {
   2320			dev_dbg(&mmc->class_dev,
   2321				"could not request IRQ %d for detect pin\n",
   2322				gpio_to_irq(slot->detect_pin));
   2323			slot->detect_pin = -EBUSY;
   2324		}
   2325	}
   2326
   2327	atmci_init_debugfs(slot);
   2328
   2329	return 0;
   2330}
   2331
   2332static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
   2333		unsigned int id)
   2334{
   2335	/* Debugfs stuff is cleaned up by mmc core */
   2336
   2337	set_bit(ATMCI_SHUTDOWN, &slot->flags);
   2338	smp_wmb();
   2339
   2340	mmc_remove_host(slot->mmc);
   2341
   2342	if (gpio_is_valid(slot->detect_pin)) {
   2343		int pin = slot->detect_pin;
   2344
   2345		free_irq(gpio_to_irq(pin), slot);
   2346		del_timer_sync(&slot->detect_timer);
   2347	}
   2348
   2349	slot->host->slot[id] = NULL;
   2350	mmc_free_host(slot->mmc);
   2351}
   2352
   2353static int atmci_configure_dma(struct atmel_mci *host)
   2354{
   2355	host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
   2356
   2357	if (PTR_ERR(host->dma.chan) == -ENODEV) {
   2358		struct mci_platform_data *pdata = host->pdev->dev.platform_data;
   2359		dma_cap_mask_t mask;
   2360
   2361		if (!pdata || !pdata->dma_filter)
   2362			return -ENODEV;
   2363
   2364		dma_cap_zero(mask);
   2365		dma_cap_set(DMA_SLAVE, mask);
   2366
   2367		host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
   2368						     pdata->dma_slave);
   2369		if (!host->dma.chan)
   2370			host->dma.chan = ERR_PTR(-ENODEV);
   2371	}
   2372
   2373	if (IS_ERR(host->dma.chan))
   2374		return PTR_ERR(host->dma.chan);
   2375
   2376	dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
   2377		 dma_chan_name(host->dma.chan));
   2378
   2379	host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
   2380	host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
   2381	host->dma_conf.src_maxburst = 1;
   2382	host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
   2383	host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
   2384	host->dma_conf.dst_maxburst = 1;
   2385	host->dma_conf.device_fc = false;
   2386
   2387	return 0;
   2388}
   2389
   2390/*
   2391 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
   2392 * HSMCI provides DMA support and a new config register but no more supports
   2393 * PDC.
   2394 */
   2395static void atmci_get_cap(struct atmel_mci *host)
   2396{
   2397	unsigned int version;
   2398
   2399	version = atmci_get_version(host);
   2400	dev_info(&host->pdev->dev,
   2401			"version: 0x%x\n", version);
   2402
   2403	host->caps.has_dma_conf_reg = false;
   2404	host->caps.has_pdc = true;
   2405	host->caps.has_cfg_reg = false;
   2406	host->caps.has_cstor_reg = false;
   2407	host->caps.has_highspeed = false;
   2408	host->caps.has_rwproof = false;
   2409	host->caps.has_odd_clk_div = false;
   2410	host->caps.has_bad_data_ordering = true;
   2411	host->caps.need_reset_after_xfer = true;
   2412	host->caps.need_blksz_mul_4 = true;
   2413	host->caps.need_notbusy_for_read_ops = false;
   2414
   2415	/* keep only major version number */
   2416	switch (version & 0xf00) {
   2417	case 0x600:
   2418	case 0x500:
   2419		host->caps.has_odd_clk_div = true;
   2420		fallthrough;
   2421	case 0x400:
   2422	case 0x300:
   2423		host->caps.has_dma_conf_reg = true;
   2424		host->caps.has_pdc = false;
   2425		host->caps.has_cfg_reg = true;
   2426		host->caps.has_cstor_reg = true;
   2427		host->caps.has_highspeed = true;
   2428		fallthrough;
   2429	case 0x200:
   2430		host->caps.has_rwproof = true;
   2431		host->caps.need_blksz_mul_4 = false;
   2432		host->caps.need_notbusy_for_read_ops = true;
   2433		fallthrough;
   2434	case 0x100:
   2435		host->caps.has_bad_data_ordering = false;
   2436		host->caps.need_reset_after_xfer = false;
   2437		fallthrough;
   2438	case 0x0:
   2439		break;
   2440	default:
   2441		host->caps.has_pdc = false;
   2442		dev_warn(&host->pdev->dev,
   2443				"Unmanaged mci version, set minimum capabilities\n");
   2444		break;
   2445	}
   2446}
   2447
   2448static int atmci_probe(struct platform_device *pdev)
   2449{
   2450	struct mci_platform_data	*pdata;
   2451	struct atmel_mci		*host;
   2452	struct resource			*regs;
   2453	unsigned int			nr_slots;
   2454	int				irq;
   2455	int				ret, i;
   2456
   2457	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   2458	if (!regs)
   2459		return -ENXIO;
   2460	pdata = pdev->dev.platform_data;
   2461	if (!pdata) {
   2462		pdata = atmci_of_init(pdev);
   2463		if (IS_ERR(pdata)) {
   2464			dev_err(&pdev->dev, "platform data not available\n");
   2465			return PTR_ERR(pdata);
   2466		}
   2467	}
   2468
   2469	irq = platform_get_irq(pdev, 0);
   2470	if (irq < 0)
   2471		return irq;
   2472
   2473	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
   2474	if (!host)
   2475		return -ENOMEM;
   2476
   2477	host->pdev = pdev;
   2478	spin_lock_init(&host->lock);
   2479	INIT_LIST_HEAD(&host->queue);
   2480
   2481	host->mck = devm_clk_get(&pdev->dev, "mci_clk");
   2482	if (IS_ERR(host->mck))
   2483		return PTR_ERR(host->mck);
   2484
   2485	host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
   2486	if (!host->regs)
   2487		return -ENOMEM;
   2488
   2489	ret = clk_prepare_enable(host->mck);
   2490	if (ret)
   2491		return ret;
   2492
   2493	atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
   2494	host->bus_hz = clk_get_rate(host->mck);
   2495
   2496	host->mapbase = regs->start;
   2497
   2498	tasklet_setup(&host->tasklet, atmci_tasklet_func);
   2499
   2500	ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
   2501	if (ret) {
   2502		clk_disable_unprepare(host->mck);
   2503		return ret;
   2504	}
   2505
   2506	/* Get MCI capabilities and set operations according to it */
   2507	atmci_get_cap(host);
   2508	ret = atmci_configure_dma(host);
   2509	if (ret == -EPROBE_DEFER)
   2510		goto err_dma_probe_defer;
   2511	if (ret == 0) {
   2512		host->prepare_data = &atmci_prepare_data_dma;
   2513		host->submit_data = &atmci_submit_data_dma;
   2514		host->stop_transfer = &atmci_stop_transfer_dma;
   2515	} else if (host->caps.has_pdc) {
   2516		dev_info(&pdev->dev, "using PDC\n");
   2517		host->prepare_data = &atmci_prepare_data_pdc;
   2518		host->submit_data = &atmci_submit_data_pdc;
   2519		host->stop_transfer = &atmci_stop_transfer_pdc;
   2520	} else {
   2521		dev_info(&pdev->dev, "using PIO\n");
   2522		host->prepare_data = &atmci_prepare_data;
   2523		host->submit_data = &atmci_submit_data;
   2524		host->stop_transfer = &atmci_stop_transfer;
   2525	}
   2526
   2527	platform_set_drvdata(pdev, host);
   2528
   2529	timer_setup(&host->timer, atmci_timeout_timer, 0);
   2530
   2531	pm_runtime_get_noresume(&pdev->dev);
   2532	pm_runtime_set_active(&pdev->dev);
   2533	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
   2534	pm_runtime_use_autosuspend(&pdev->dev);
   2535	pm_runtime_enable(&pdev->dev);
   2536
   2537	/* We need at least one slot to succeed */
   2538	nr_slots = 0;
   2539	ret = -ENODEV;
   2540	if (pdata->slot[0].bus_width) {
   2541		ret = atmci_init_slot(host, &pdata->slot[0],
   2542				0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
   2543		if (!ret) {
   2544			nr_slots++;
   2545			host->buf_size = host->slot[0]->mmc->max_req_size;
   2546		}
   2547	}
   2548	if (pdata->slot[1].bus_width) {
   2549		ret = atmci_init_slot(host, &pdata->slot[1],
   2550				1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
   2551		if (!ret) {
   2552			nr_slots++;
   2553			if (host->slot[1]->mmc->max_req_size > host->buf_size)
   2554				host->buf_size =
   2555					host->slot[1]->mmc->max_req_size;
   2556		}
   2557	}
   2558
   2559	if (!nr_slots) {
   2560		dev_err(&pdev->dev, "init failed: no slot defined\n");
   2561		goto err_init_slot;
   2562	}
   2563
   2564	if (!host->caps.has_rwproof) {
   2565		host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
   2566		                                  &host->buf_phys_addr,
   2567						  GFP_KERNEL);
   2568		if (!host->buffer) {
   2569			ret = -ENOMEM;
   2570			dev_err(&pdev->dev, "buffer allocation failed\n");
   2571			goto err_dma_alloc;
   2572		}
   2573	}
   2574
   2575	dev_info(&pdev->dev,
   2576			"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
   2577			host->mapbase, irq, nr_slots);
   2578
   2579	pm_runtime_mark_last_busy(&host->pdev->dev);
   2580	pm_runtime_put_autosuspend(&pdev->dev);
   2581
   2582	return 0;
   2583
   2584err_dma_alloc:
   2585	for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
   2586		if (host->slot[i])
   2587			atmci_cleanup_slot(host->slot[i], i);
   2588	}
   2589err_init_slot:
   2590	clk_disable_unprepare(host->mck);
   2591
   2592	pm_runtime_disable(&pdev->dev);
   2593	pm_runtime_put_noidle(&pdev->dev);
   2594
   2595	del_timer_sync(&host->timer);
   2596	if (!IS_ERR(host->dma.chan))
   2597		dma_release_channel(host->dma.chan);
   2598err_dma_probe_defer:
   2599	free_irq(irq, host);
   2600	return ret;
   2601}
   2602
   2603static int atmci_remove(struct platform_device *pdev)
   2604{
   2605	struct atmel_mci	*host = platform_get_drvdata(pdev);
   2606	unsigned int		i;
   2607
   2608	pm_runtime_get_sync(&pdev->dev);
   2609
   2610	if (host->buffer)
   2611		dma_free_coherent(&pdev->dev, host->buf_size,
   2612		                  host->buffer, host->buf_phys_addr);
   2613
   2614	for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
   2615		if (host->slot[i])
   2616			atmci_cleanup_slot(host->slot[i], i);
   2617	}
   2618
   2619	atmci_writel(host, ATMCI_IDR, ~0UL);
   2620	atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
   2621	atmci_readl(host, ATMCI_SR);
   2622
   2623	del_timer_sync(&host->timer);
   2624	if (!IS_ERR(host->dma.chan))
   2625		dma_release_channel(host->dma.chan);
   2626
   2627	free_irq(platform_get_irq(pdev, 0), host);
   2628
   2629	clk_disable_unprepare(host->mck);
   2630
   2631	pm_runtime_disable(&pdev->dev);
   2632	pm_runtime_put_noidle(&pdev->dev);
   2633
   2634	return 0;
   2635}
   2636
   2637#ifdef CONFIG_PM
   2638static int atmci_runtime_suspend(struct device *dev)
   2639{
   2640	struct atmel_mci *host = dev_get_drvdata(dev);
   2641
   2642	clk_disable_unprepare(host->mck);
   2643
   2644	pinctrl_pm_select_sleep_state(dev);
   2645
   2646	return 0;
   2647}
   2648
   2649static int atmci_runtime_resume(struct device *dev)
   2650{
   2651	struct atmel_mci *host = dev_get_drvdata(dev);
   2652
   2653	pinctrl_select_default_state(dev);
   2654
   2655	return clk_prepare_enable(host->mck);
   2656}
   2657#endif
   2658
   2659static const struct dev_pm_ops atmci_dev_pm_ops = {
   2660	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
   2661				pm_runtime_force_resume)
   2662	SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
   2663};
   2664
   2665static struct platform_driver atmci_driver = {
   2666	.probe		= atmci_probe,
   2667	.remove		= atmci_remove,
   2668	.driver		= {
   2669		.name		= "atmel_mci",
   2670		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
   2671		.of_match_table	= of_match_ptr(atmci_dt_ids),
   2672		.pm		= &atmci_dev_pm_ops,
   2673	},
   2674};
   2675module_platform_driver(atmci_driver);
   2676
   2677MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
   2678MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
   2679MODULE_LICENSE("GPL v2");