cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (59436B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/drivers/mmc/core/core.c
      4 *
      5 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
      6 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
      7 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
      8 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
      9 */
     10#include <linux/module.h>
     11#include <linux/init.h>
     12#include <linux/interrupt.h>
     13#include <linux/completion.h>
     14#include <linux/device.h>
     15#include <linux/delay.h>
     16#include <linux/pagemap.h>
     17#include <linux/err.h>
     18#include <linux/leds.h>
     19#include <linux/scatterlist.h>
     20#include <linux/log2.h>
     21#include <linux/pm_runtime.h>
     22#include <linux/pm_wakeup.h>
     23#include <linux/suspend.h>
     24#include <linux/fault-inject.h>
     25#include <linux/random.h>
     26#include <linux/slab.h>
     27#include <linux/of.h>
     28
     29#include <linux/mmc/card.h>
     30#include <linux/mmc/host.h>
     31#include <linux/mmc/mmc.h>
     32#include <linux/mmc/sd.h>
     33#include <linux/mmc/slot-gpio.h>
     34
     35#define CREATE_TRACE_POINTS
     36#include <trace/events/mmc.h>
     37
     38#include "core.h"
     39#include "card.h"
     40#include "crypto.h"
     41#include "bus.h"
     42#include "host.h"
     43#include "sdio_bus.h"
     44#include "pwrseq.h"
     45
     46#include "mmc_ops.h"
     47#include "sd_ops.h"
     48#include "sdio_ops.h"
     49
     50/* The max erase timeout, used when host->max_busy_timeout isn't specified */
     51#define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
     52#define SD_DISCARD_TIMEOUT_MS	(250)
     53
     54static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
     55
     56/*
     57 * Enabling software CRCs on the data blocks can be a significant (30%)
     58 * performance cost, and for other reasons may not always be desired.
     59 * So we allow it it to be disabled.
     60 */
     61bool use_spi_crc = 1;
     62module_param(use_spi_crc, bool, 0);
     63
     64static int mmc_schedule_delayed_work(struct delayed_work *work,
     65				     unsigned long delay)
     66{
     67	/*
     68	 * We use the system_freezable_wq, because of two reasons.
     69	 * First, it allows several works (not the same work item) to be
     70	 * executed simultaneously. Second, the queue becomes frozen when
     71	 * userspace becomes frozen during system PM.
     72	 */
     73	return queue_delayed_work(system_freezable_wq, work, delay);
     74}
     75
     76#ifdef CONFIG_FAIL_MMC_REQUEST
     77
     78/*
     79 * Internal function. Inject random data errors.
     80 * If mmc_data is NULL no errors are injected.
     81 */
     82static void mmc_should_fail_request(struct mmc_host *host,
     83				    struct mmc_request *mrq)
     84{
     85	struct mmc_command *cmd = mrq->cmd;
     86	struct mmc_data *data = mrq->data;
     87	static const int data_errors[] = {
     88		-ETIMEDOUT,
     89		-EILSEQ,
     90		-EIO,
     91	};
     92
     93	if (!data)
     94		return;
     95
     96	if ((cmd && cmd->error) || data->error ||
     97	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
     98		return;
     99
    100	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
    101	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
    102}
    103
    104#else /* CONFIG_FAIL_MMC_REQUEST */
    105
    106static inline void mmc_should_fail_request(struct mmc_host *host,
    107					   struct mmc_request *mrq)
    108{
    109}
    110
    111#endif /* CONFIG_FAIL_MMC_REQUEST */
    112
    113static inline void mmc_complete_cmd(struct mmc_request *mrq)
    114{
    115	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
    116		complete_all(&mrq->cmd_completion);
    117}
    118
    119void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
    120{
    121	if (!mrq->cap_cmd_during_tfr)
    122		return;
    123
    124	mmc_complete_cmd(mrq);
    125
    126	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
    127		 mmc_hostname(host), mrq->cmd->opcode);
    128}
    129EXPORT_SYMBOL(mmc_command_done);
    130
    131/**
    132 *	mmc_request_done - finish processing an MMC request
    133 *	@host: MMC host which completed request
    134 *	@mrq: MMC request which request
    135 *
    136 *	MMC drivers should call this function when they have completed
    137 *	their processing of a request.
    138 */
    139void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
    140{
    141	struct mmc_command *cmd = mrq->cmd;
    142	int err = cmd->error;
    143
    144	/* Flag re-tuning needed on CRC errors */
    145	if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
    146	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
    147	    !host->retune_crc_disable &&
    148	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
    149	    (mrq->data && mrq->data->error == -EILSEQ) ||
    150	    (mrq->stop && mrq->stop->error == -EILSEQ)))
    151		mmc_retune_needed(host);
    152
    153	if (err && cmd->retries && mmc_host_is_spi(host)) {
    154		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
    155			cmd->retries = 0;
    156	}
    157
    158	if (host->ongoing_mrq == mrq)
    159		host->ongoing_mrq = NULL;
    160
    161	mmc_complete_cmd(mrq);
    162
    163	trace_mmc_request_done(host, mrq);
    164
    165	/*
    166	 * We list various conditions for the command to be considered
    167	 * properly done:
    168	 *
    169	 * - There was no error, OK fine then
    170	 * - We are not doing some kind of retry
    171	 * - The card was removed (...so just complete everything no matter
    172	 *   if there are errors or retries)
    173	 */
    174	if (!err || !cmd->retries || mmc_card_removed(host->card)) {
    175		mmc_should_fail_request(host, mrq);
    176
    177		if (!host->ongoing_mrq)
    178			led_trigger_event(host->led, LED_OFF);
    179
    180		if (mrq->sbc) {
    181			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
    182				mmc_hostname(host), mrq->sbc->opcode,
    183				mrq->sbc->error,
    184				mrq->sbc->resp[0], mrq->sbc->resp[1],
    185				mrq->sbc->resp[2], mrq->sbc->resp[3]);
    186		}
    187
    188		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
    189			mmc_hostname(host), cmd->opcode, err,
    190			cmd->resp[0], cmd->resp[1],
    191			cmd->resp[2], cmd->resp[3]);
    192
    193		if (mrq->data) {
    194			pr_debug("%s:     %d bytes transferred: %d\n",
    195				mmc_hostname(host),
    196				mrq->data->bytes_xfered, mrq->data->error);
    197		}
    198
    199		if (mrq->stop) {
    200			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
    201				mmc_hostname(host), mrq->stop->opcode,
    202				mrq->stop->error,
    203				mrq->stop->resp[0], mrq->stop->resp[1],
    204				mrq->stop->resp[2], mrq->stop->resp[3]);
    205		}
    206	}
    207	/*
    208	 * Request starter must handle retries - see
    209	 * mmc_wait_for_req_done().
    210	 */
    211	if (mrq->done)
    212		mrq->done(mrq);
    213}
    214
    215EXPORT_SYMBOL(mmc_request_done);
    216
    217static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
    218{
    219	int err;
    220
    221	/* Assumes host controller has been runtime resumed by mmc_claim_host */
    222	err = mmc_retune(host);
    223	if (err) {
    224		mrq->cmd->error = err;
    225		mmc_request_done(host, mrq);
    226		return;
    227	}
    228
    229	/*
    230	 * For sdio rw commands we must wait for card busy otherwise some
    231	 * sdio devices won't work properly.
    232	 * And bypass I/O abort, reset and bus suspend operations.
    233	 */
    234	if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
    235	    host->ops->card_busy) {
    236		int tries = 500; /* Wait aprox 500ms at maximum */
    237
    238		while (host->ops->card_busy(host) && --tries)
    239			mmc_delay(1);
    240
    241		if (tries == 0) {
    242			mrq->cmd->error = -EBUSY;
    243			mmc_request_done(host, mrq);
    244			return;
    245		}
    246	}
    247
    248	if (mrq->cap_cmd_during_tfr) {
    249		host->ongoing_mrq = mrq;
    250		/*
    251		 * Retry path could come through here without having waiting on
    252		 * cmd_completion, so ensure it is reinitialised.
    253		 */
    254		reinit_completion(&mrq->cmd_completion);
    255	}
    256
    257	trace_mmc_request_start(host, mrq);
    258
    259	if (host->cqe_on)
    260		host->cqe_ops->cqe_off(host);
    261
    262	host->ops->request(host, mrq);
    263}
    264
    265static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
    266			     bool cqe)
    267{
    268	if (mrq->sbc) {
    269		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
    270			 mmc_hostname(host), mrq->sbc->opcode,
    271			 mrq->sbc->arg, mrq->sbc->flags);
    272	}
    273
    274	if (mrq->cmd) {
    275		pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
    276			 mmc_hostname(host), cqe ? "CQE direct " : "",
    277			 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
    278	} else if (cqe) {
    279		pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
    280			 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
    281	}
    282
    283	if (mrq->data) {
    284		pr_debug("%s:     blksz %d blocks %d flags %08x "
    285			"tsac %d ms nsac %d\n",
    286			mmc_hostname(host), mrq->data->blksz,
    287			mrq->data->blocks, mrq->data->flags,
    288			mrq->data->timeout_ns / 1000000,
    289			mrq->data->timeout_clks);
    290	}
    291
    292	if (mrq->stop) {
    293		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
    294			 mmc_hostname(host), mrq->stop->opcode,
    295			 mrq->stop->arg, mrq->stop->flags);
    296	}
    297}
    298
    299static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
    300{
    301	unsigned int i, sz = 0;
    302	struct scatterlist *sg;
    303
    304	if (mrq->cmd) {
    305		mrq->cmd->error = 0;
    306		mrq->cmd->mrq = mrq;
    307		mrq->cmd->data = mrq->data;
    308	}
    309	if (mrq->sbc) {
    310		mrq->sbc->error = 0;
    311		mrq->sbc->mrq = mrq;
    312	}
    313	if (mrq->data) {
    314		if (mrq->data->blksz > host->max_blk_size ||
    315		    mrq->data->blocks > host->max_blk_count ||
    316		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
    317			return -EINVAL;
    318
    319		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
    320			sz += sg->length;
    321		if (sz != mrq->data->blocks * mrq->data->blksz)
    322			return -EINVAL;
    323
    324		mrq->data->error = 0;
    325		mrq->data->mrq = mrq;
    326		if (mrq->stop) {
    327			mrq->data->stop = mrq->stop;
    328			mrq->stop->error = 0;
    329			mrq->stop->mrq = mrq;
    330		}
    331	}
    332
    333	return 0;
    334}
    335
    336int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
    337{
    338	int err;
    339
    340	init_completion(&mrq->cmd_completion);
    341
    342	mmc_retune_hold(host);
    343
    344	if (mmc_card_removed(host->card))
    345		return -ENOMEDIUM;
    346
    347	mmc_mrq_pr_debug(host, mrq, false);
    348
    349	WARN_ON(!host->claimed);
    350
    351	err = mmc_mrq_prep(host, mrq);
    352	if (err)
    353		return err;
    354
    355	led_trigger_event(host->led, LED_FULL);
    356	__mmc_start_request(host, mrq);
    357
    358	return 0;
    359}
    360EXPORT_SYMBOL(mmc_start_request);
    361
    362static void mmc_wait_done(struct mmc_request *mrq)
    363{
    364	complete(&mrq->completion);
    365}
    366
    367static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
    368{
    369	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
    370
    371	/*
    372	 * If there is an ongoing transfer, wait for the command line to become
    373	 * available.
    374	 */
    375	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
    376		wait_for_completion(&ongoing_mrq->cmd_completion);
    377}
    378
    379static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
    380{
    381	int err;
    382
    383	mmc_wait_ongoing_tfr_cmd(host);
    384
    385	init_completion(&mrq->completion);
    386	mrq->done = mmc_wait_done;
    387
    388	err = mmc_start_request(host, mrq);
    389	if (err) {
    390		mrq->cmd->error = err;
    391		mmc_complete_cmd(mrq);
    392		complete(&mrq->completion);
    393	}
    394
    395	return err;
    396}
    397
    398void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
    399{
    400	struct mmc_command *cmd;
    401
    402	while (1) {
    403		wait_for_completion(&mrq->completion);
    404
    405		cmd = mrq->cmd;
    406
    407		if (!cmd->error || !cmd->retries ||
    408		    mmc_card_removed(host->card))
    409			break;
    410
    411		mmc_retune_recheck(host);
    412
    413		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
    414			 mmc_hostname(host), cmd->opcode, cmd->error);
    415		cmd->retries--;
    416		cmd->error = 0;
    417		__mmc_start_request(host, mrq);
    418	}
    419
    420	mmc_retune_release(host);
    421}
    422EXPORT_SYMBOL(mmc_wait_for_req_done);
    423
    424/*
    425 * mmc_cqe_start_req - Start a CQE request.
    426 * @host: MMC host to start the request
    427 * @mrq: request to start
    428 *
    429 * Start the request, re-tuning if needed and it is possible. Returns an error
    430 * code if the request fails to start or -EBUSY if CQE is busy.
    431 */
    432int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
    433{
    434	int err;
    435
    436	/*
    437	 * CQE cannot process re-tuning commands. Caller must hold retuning
    438	 * while CQE is in use.  Re-tuning can happen here only when CQE has no
    439	 * active requests i.e. this is the first.  Note, re-tuning will call
    440	 * ->cqe_off().
    441	 */
    442	err = mmc_retune(host);
    443	if (err)
    444		goto out_err;
    445
    446	mrq->host = host;
    447
    448	mmc_mrq_pr_debug(host, mrq, true);
    449
    450	err = mmc_mrq_prep(host, mrq);
    451	if (err)
    452		goto out_err;
    453
    454	err = host->cqe_ops->cqe_request(host, mrq);
    455	if (err)
    456		goto out_err;
    457
    458	trace_mmc_request_start(host, mrq);
    459
    460	return 0;
    461
    462out_err:
    463	if (mrq->cmd) {
    464		pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
    465			 mmc_hostname(host), mrq->cmd->opcode, err);
    466	} else {
    467		pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
    468			 mmc_hostname(host), mrq->tag, err);
    469	}
    470	return err;
    471}
    472EXPORT_SYMBOL(mmc_cqe_start_req);
    473
    474/**
    475 *	mmc_cqe_request_done - CQE has finished processing an MMC request
    476 *	@host: MMC host which completed request
    477 *	@mrq: MMC request which completed
    478 *
    479 *	CQE drivers should call this function when they have completed
    480 *	their processing of a request.
    481 */
    482void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
    483{
    484	mmc_should_fail_request(host, mrq);
    485
    486	/* Flag re-tuning needed on CRC errors */
    487	if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
    488	    (mrq->data && mrq->data->error == -EILSEQ))
    489		mmc_retune_needed(host);
    490
    491	trace_mmc_request_done(host, mrq);
    492
    493	if (mrq->cmd) {
    494		pr_debug("%s: CQE req done (direct CMD%u): %d\n",
    495			 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
    496	} else {
    497		pr_debug("%s: CQE transfer done tag %d\n",
    498			 mmc_hostname(host), mrq->tag);
    499	}
    500
    501	if (mrq->data) {
    502		pr_debug("%s:     %d bytes transferred: %d\n",
    503			 mmc_hostname(host),
    504			 mrq->data->bytes_xfered, mrq->data->error);
    505	}
    506
    507	mrq->done(mrq);
    508}
    509EXPORT_SYMBOL(mmc_cqe_request_done);
    510
    511/**
    512 *	mmc_cqe_post_req - CQE post process of a completed MMC request
    513 *	@host: MMC host
    514 *	@mrq: MMC request to be processed
    515 */
    516void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
    517{
    518	if (host->cqe_ops->cqe_post_req)
    519		host->cqe_ops->cqe_post_req(host, mrq);
    520}
    521EXPORT_SYMBOL(mmc_cqe_post_req);
    522
    523/* Arbitrary 1 second timeout */
    524#define MMC_CQE_RECOVERY_TIMEOUT	1000
    525
    526/*
    527 * mmc_cqe_recovery - Recover from CQE errors.
    528 * @host: MMC host to recover
    529 *
    530 * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
    531 * in eMMC, and discarding the queue in CQE. CQE must call
    532 * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
    533 * fails to discard its queue.
    534 */
    535int mmc_cqe_recovery(struct mmc_host *host)
    536{
    537	struct mmc_command cmd;
    538	int err;
    539
    540	mmc_retune_hold_now(host);
    541
    542	/*
    543	 * Recovery is expected seldom, if at all, but it reduces performance,
    544	 * so make sure it is not completely silent.
    545	 */
    546	pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
    547
    548	host->cqe_ops->cqe_recovery_start(host);
    549
    550	memset(&cmd, 0, sizeof(cmd));
    551	cmd.opcode       = MMC_STOP_TRANSMISSION;
    552	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
    553	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
    554	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
    555	mmc_wait_for_cmd(host, &cmd, 0);
    556
    557	memset(&cmd, 0, sizeof(cmd));
    558	cmd.opcode       = MMC_CMDQ_TASK_MGMT;
    559	cmd.arg          = 1; /* Discard entire queue */
    560	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
    561	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
    562	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
    563	err = mmc_wait_for_cmd(host, &cmd, 0);
    564
    565	host->cqe_ops->cqe_recovery_finish(host);
    566
    567	mmc_retune_release(host);
    568
    569	return err;
    570}
    571EXPORT_SYMBOL(mmc_cqe_recovery);
    572
    573/**
    574 *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
    575 *	@host: MMC host
    576 *	@mrq: MMC request
    577 *
    578 *	mmc_is_req_done() is used with requests that have
    579 *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
    580 *	starting a request and before waiting for it to complete. That is,
    581 *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
    582 *	and before mmc_wait_for_req_done(). If it is called at other times the
    583 *	result is not meaningful.
    584 */
    585bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
    586{
    587	return completion_done(&mrq->completion);
    588}
    589EXPORT_SYMBOL(mmc_is_req_done);
    590
    591/**
    592 *	mmc_wait_for_req - start a request and wait for completion
    593 *	@host: MMC host to start command
    594 *	@mrq: MMC request to start
    595 *
    596 *	Start a new MMC custom command request for a host, and wait
    597 *	for the command to complete. In the case of 'cap_cmd_during_tfr'
    598 *	requests, the transfer is ongoing and the caller can issue further
    599 *	commands that do not use the data lines, and then wait by calling
    600 *	mmc_wait_for_req_done().
    601 *	Does not attempt to parse the response.
    602 */
    603void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
    604{
    605	__mmc_start_req(host, mrq);
    606
    607	if (!mrq->cap_cmd_during_tfr)
    608		mmc_wait_for_req_done(host, mrq);
    609}
    610EXPORT_SYMBOL(mmc_wait_for_req);
    611
    612/**
    613 *	mmc_wait_for_cmd - start a command and wait for completion
    614 *	@host: MMC host to start command
    615 *	@cmd: MMC command to start
    616 *	@retries: maximum number of retries
    617 *
    618 *	Start a new MMC command for a host, and wait for the command
    619 *	to complete.  Return any error that occurred while the command
    620 *	was executing.  Do not attempt to parse the response.
    621 */
    622int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
    623{
    624	struct mmc_request mrq = {};
    625
    626	WARN_ON(!host->claimed);
    627
    628	memset(cmd->resp, 0, sizeof(cmd->resp));
    629	cmd->retries = retries;
    630
    631	mrq.cmd = cmd;
    632	cmd->data = NULL;
    633
    634	mmc_wait_for_req(host, &mrq);
    635
    636	return cmd->error;
    637}
    638
    639EXPORT_SYMBOL(mmc_wait_for_cmd);
    640
    641/**
    642 *	mmc_set_data_timeout - set the timeout for a data command
    643 *	@data: data phase for command
    644 *	@card: the MMC card associated with the data transfer
    645 *
    646 *	Computes the data timeout parameters according to the
    647 *	correct algorithm given the card type.
    648 */
    649void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
    650{
    651	unsigned int mult;
    652
    653	/*
    654	 * SDIO cards only define an upper 1 s limit on access.
    655	 */
    656	if (mmc_card_sdio(card)) {
    657		data->timeout_ns = 1000000000;
    658		data->timeout_clks = 0;
    659		return;
    660	}
    661
    662	/*
    663	 * SD cards use a 100 multiplier rather than 10
    664	 */
    665	mult = mmc_card_sd(card) ? 100 : 10;
    666
    667	/*
    668	 * Scale up the multiplier (and therefore the timeout) by
    669	 * the r2w factor for writes.
    670	 */
    671	if (data->flags & MMC_DATA_WRITE)
    672		mult <<= card->csd.r2w_factor;
    673
    674	data->timeout_ns = card->csd.taac_ns * mult;
    675	data->timeout_clks = card->csd.taac_clks * mult;
    676
    677	/*
    678	 * SD cards also have an upper limit on the timeout.
    679	 */
    680	if (mmc_card_sd(card)) {
    681		unsigned int timeout_us, limit_us;
    682
    683		timeout_us = data->timeout_ns / 1000;
    684		if (card->host->ios.clock)
    685			timeout_us += data->timeout_clks * 1000 /
    686				(card->host->ios.clock / 1000);
    687
    688		if (data->flags & MMC_DATA_WRITE)
    689			/*
    690			 * The MMC spec "It is strongly recommended
    691			 * for hosts to implement more than 500ms
    692			 * timeout value even if the card indicates
    693			 * the 250ms maximum busy length."  Even the
    694			 * previous value of 300ms is known to be
    695			 * insufficient for some cards.
    696			 */
    697			limit_us = 3000000;
    698		else
    699			limit_us = 100000;
    700
    701		/*
    702		 * SDHC cards always use these fixed values.
    703		 */
    704		if (timeout_us > limit_us) {
    705			data->timeout_ns = limit_us * 1000;
    706			data->timeout_clks = 0;
    707		}
    708
    709		/* assign limit value if invalid */
    710		if (timeout_us == 0)
    711			data->timeout_ns = limit_us * 1000;
    712	}
    713
    714	/*
    715	 * Some cards require longer data read timeout than indicated in CSD.
    716	 * Address this by setting the read timeout to a "reasonably high"
    717	 * value. For the cards tested, 600ms has proven enough. If necessary,
    718	 * this value can be increased if other problematic cards require this.
    719	 */
    720	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
    721		data->timeout_ns = 600000000;
    722		data->timeout_clks = 0;
    723	}
    724
    725	/*
    726	 * Some cards need very high timeouts if driven in SPI mode.
    727	 * The worst observed timeout was 900ms after writing a
    728	 * continuous stream of data until the internal logic
    729	 * overflowed.
    730	 */
    731	if (mmc_host_is_spi(card->host)) {
    732		if (data->flags & MMC_DATA_WRITE) {
    733			if (data->timeout_ns < 1000000000)
    734				data->timeout_ns = 1000000000;	/* 1s */
    735		} else {
    736			if (data->timeout_ns < 100000000)
    737				data->timeout_ns =  100000000;	/* 100ms */
    738		}
    739	}
    740}
    741EXPORT_SYMBOL(mmc_set_data_timeout);
    742
    743/*
    744 * Allow claiming an already claimed host if the context is the same or there is
    745 * no context but the task is the same.
    746 */
    747static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
    748				   struct task_struct *task)
    749{
    750	return host->claimer == ctx ||
    751	       (!ctx && task && host->claimer->task == task);
    752}
    753
    754static inline void mmc_ctx_set_claimer(struct mmc_host *host,
    755				       struct mmc_ctx *ctx,
    756				       struct task_struct *task)
    757{
    758	if (!host->claimer) {
    759		if (ctx)
    760			host->claimer = ctx;
    761		else
    762			host->claimer = &host->default_ctx;
    763	}
    764	if (task)
    765		host->claimer->task = task;
    766}
    767
    768/**
    769 *	__mmc_claim_host - exclusively claim a host
    770 *	@host: mmc host to claim
    771 *	@ctx: context that claims the host or NULL in which case the default
    772 *	context will be used
    773 *	@abort: whether or not the operation should be aborted
    774 *
    775 *	Claim a host for a set of operations.  If @abort is non null and
    776 *	dereference a non-zero value then this will return prematurely with
    777 *	that non-zero value without acquiring the lock.  Returns zero
    778 *	with the lock held otherwise.
    779 */
    780int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
    781		     atomic_t *abort)
    782{
    783	struct task_struct *task = ctx ? NULL : current;
    784	DECLARE_WAITQUEUE(wait, current);
    785	unsigned long flags;
    786	int stop;
    787	bool pm = false;
    788
    789	might_sleep();
    790
    791	add_wait_queue(&host->wq, &wait);
    792	spin_lock_irqsave(&host->lock, flags);
    793	while (1) {
    794		set_current_state(TASK_UNINTERRUPTIBLE);
    795		stop = abort ? atomic_read(abort) : 0;
    796		if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
    797			break;
    798		spin_unlock_irqrestore(&host->lock, flags);
    799		schedule();
    800		spin_lock_irqsave(&host->lock, flags);
    801	}
    802	set_current_state(TASK_RUNNING);
    803	if (!stop) {
    804		host->claimed = 1;
    805		mmc_ctx_set_claimer(host, ctx, task);
    806		host->claim_cnt += 1;
    807		if (host->claim_cnt == 1)
    808			pm = true;
    809	} else
    810		wake_up(&host->wq);
    811	spin_unlock_irqrestore(&host->lock, flags);
    812	remove_wait_queue(&host->wq, &wait);
    813
    814	if (pm)
    815		pm_runtime_get_sync(mmc_dev(host));
    816
    817	return stop;
    818}
    819EXPORT_SYMBOL(__mmc_claim_host);
    820
    821/**
    822 *	mmc_release_host - release a host
    823 *	@host: mmc host to release
    824 *
    825 *	Release a MMC host, allowing others to claim the host
    826 *	for their operations.
    827 */
    828void mmc_release_host(struct mmc_host *host)
    829{
    830	unsigned long flags;
    831
    832	WARN_ON(!host->claimed);
    833
    834	spin_lock_irqsave(&host->lock, flags);
    835	if (--host->claim_cnt) {
    836		/* Release for nested claim */
    837		spin_unlock_irqrestore(&host->lock, flags);
    838	} else {
    839		host->claimed = 0;
    840		host->claimer->task = NULL;
    841		host->claimer = NULL;
    842		spin_unlock_irqrestore(&host->lock, flags);
    843		wake_up(&host->wq);
    844		pm_runtime_mark_last_busy(mmc_dev(host));
    845		if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
    846			pm_runtime_put_sync_suspend(mmc_dev(host));
    847		else
    848			pm_runtime_put_autosuspend(mmc_dev(host));
    849	}
    850}
    851EXPORT_SYMBOL(mmc_release_host);
    852
    853/*
    854 * This is a helper function, which fetches a runtime pm reference for the
    855 * card device and also claims the host.
    856 */
    857void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
    858{
    859	pm_runtime_get_sync(&card->dev);
    860	__mmc_claim_host(card->host, ctx, NULL);
    861}
    862EXPORT_SYMBOL(mmc_get_card);
    863
    864/*
    865 * This is a helper function, which releases the host and drops the runtime
    866 * pm reference for the card device.
    867 */
    868void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
    869{
    870	struct mmc_host *host = card->host;
    871
    872	WARN_ON(ctx && host->claimer != ctx);
    873
    874	mmc_release_host(host);
    875	pm_runtime_mark_last_busy(&card->dev);
    876	pm_runtime_put_autosuspend(&card->dev);
    877}
    878EXPORT_SYMBOL(mmc_put_card);
    879
    880/*
    881 * Internal function that does the actual ios call to the host driver,
    882 * optionally printing some debug output.
    883 */
    884static inline void mmc_set_ios(struct mmc_host *host)
    885{
    886	struct mmc_ios *ios = &host->ios;
    887
    888	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
    889		"width %u timing %u\n",
    890		 mmc_hostname(host), ios->clock, ios->bus_mode,
    891		 ios->power_mode, ios->chip_select, ios->vdd,
    892		 1 << ios->bus_width, ios->timing);
    893
    894	host->ops->set_ios(host, ios);
    895}
    896
    897/*
    898 * Control chip select pin on a host.
    899 */
    900void mmc_set_chip_select(struct mmc_host *host, int mode)
    901{
    902	host->ios.chip_select = mode;
    903	mmc_set_ios(host);
    904}
    905
    906/*
    907 * Sets the host clock to the highest possible frequency that
    908 * is below "hz".
    909 */
    910void mmc_set_clock(struct mmc_host *host, unsigned int hz)
    911{
    912	WARN_ON(hz && hz < host->f_min);
    913
    914	if (hz > host->f_max)
    915		hz = host->f_max;
    916
    917	host->ios.clock = hz;
    918	mmc_set_ios(host);
    919}
    920
    921int mmc_execute_tuning(struct mmc_card *card)
    922{
    923	struct mmc_host *host = card->host;
    924	u32 opcode;
    925	int err;
    926
    927	if (!host->ops->execute_tuning)
    928		return 0;
    929
    930	if (host->cqe_on)
    931		host->cqe_ops->cqe_off(host);
    932
    933	if (mmc_card_mmc(card))
    934		opcode = MMC_SEND_TUNING_BLOCK_HS200;
    935	else
    936		opcode = MMC_SEND_TUNING_BLOCK;
    937
    938	err = host->ops->execute_tuning(host, opcode);
    939	if (!err) {
    940		mmc_retune_clear(host);
    941		mmc_retune_enable(host);
    942		return 0;
    943	}
    944
    945	/* Only print error when we don't check for card removal */
    946	if (!host->detect_change)
    947		pr_err("%s: tuning execution failed: %d\n",
    948			mmc_hostname(host), err);
    949
    950	return err;
    951}
    952
    953/*
    954 * Change the bus mode (open drain/push-pull) of a host.
    955 */
    956void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
    957{
    958	host->ios.bus_mode = mode;
    959	mmc_set_ios(host);
    960}
    961
    962/*
    963 * Change data bus width of a host.
    964 */
    965void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
    966{
    967	host->ios.bus_width = width;
    968	mmc_set_ios(host);
    969}
    970
    971/*
    972 * Set initial state after a power cycle or a hw_reset.
    973 */
    974void mmc_set_initial_state(struct mmc_host *host)
    975{
    976	if (host->cqe_on)
    977		host->cqe_ops->cqe_off(host);
    978
    979	mmc_retune_disable(host);
    980
    981	if (mmc_host_is_spi(host))
    982		host->ios.chip_select = MMC_CS_HIGH;
    983	else
    984		host->ios.chip_select = MMC_CS_DONTCARE;
    985	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
    986	host->ios.bus_width = MMC_BUS_WIDTH_1;
    987	host->ios.timing = MMC_TIMING_LEGACY;
    988	host->ios.drv_type = 0;
    989	host->ios.enhanced_strobe = false;
    990
    991	/*
    992	 * Make sure we are in non-enhanced strobe mode before we
    993	 * actually enable it in ext_csd.
    994	 */
    995	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
    996	     host->ops->hs400_enhanced_strobe)
    997		host->ops->hs400_enhanced_strobe(host, &host->ios);
    998
    999	mmc_set_ios(host);
   1000
   1001	mmc_crypto_set_initial_state(host);
   1002}
   1003
   1004/**
   1005 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
   1006 * @vdd:	voltage (mV)
   1007 * @low_bits:	prefer low bits in boundary cases
   1008 *
   1009 * This function returns the OCR bit number according to the provided @vdd
   1010 * value. If conversion is not possible a negative errno value returned.
   1011 *
   1012 * Depending on the @low_bits flag the function prefers low or high OCR bits
   1013 * on boundary voltages. For example,
   1014 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
   1015 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
   1016 *
   1017 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
   1018 */
   1019static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
   1020{
   1021	const int max_bit = ilog2(MMC_VDD_35_36);
   1022	int bit;
   1023
   1024	if (vdd < 1650 || vdd > 3600)
   1025		return -EINVAL;
   1026
   1027	if (vdd >= 1650 && vdd <= 1950)
   1028		return ilog2(MMC_VDD_165_195);
   1029
   1030	if (low_bits)
   1031		vdd -= 1;
   1032
   1033	/* Base 2000 mV, step 100 mV, bit's base 8. */
   1034	bit = (vdd - 2000) / 100 + 8;
   1035	if (bit > max_bit)
   1036		return max_bit;
   1037	return bit;
   1038}
   1039
   1040/**
   1041 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
   1042 * @vdd_min:	minimum voltage value (mV)
   1043 * @vdd_max:	maximum voltage value (mV)
   1044 *
   1045 * This function returns the OCR mask bits according to the provided @vdd_min
   1046 * and @vdd_max values. If conversion is not possible the function returns 0.
   1047 *
   1048 * Notes wrt boundary cases:
   1049 * This function sets the OCR bits for all boundary voltages, for example
   1050 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
   1051 * MMC_VDD_34_35 mask.
   1052 */
   1053u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
   1054{
   1055	u32 mask = 0;
   1056
   1057	if (vdd_max < vdd_min)
   1058		return 0;
   1059
   1060	/* Prefer high bits for the boundary vdd_max values. */
   1061	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
   1062	if (vdd_max < 0)
   1063		return 0;
   1064
   1065	/* Prefer low bits for the boundary vdd_min values. */
   1066	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
   1067	if (vdd_min < 0)
   1068		return 0;
   1069
   1070	/* Fill the mask, from max bit to min bit. */
   1071	while (vdd_max >= vdd_min)
   1072		mask |= 1 << vdd_max--;
   1073
   1074	return mask;
   1075}
   1076
   1077static int mmc_of_get_func_num(struct device_node *node)
   1078{
   1079	u32 reg;
   1080	int ret;
   1081
   1082	ret = of_property_read_u32(node, "reg", &reg);
   1083	if (ret < 0)
   1084		return ret;
   1085
   1086	return reg;
   1087}
   1088
   1089struct device_node *mmc_of_find_child_device(struct mmc_host *host,
   1090		unsigned func_num)
   1091{
   1092	struct device_node *node;
   1093
   1094	if (!host->parent || !host->parent->of_node)
   1095		return NULL;
   1096
   1097	for_each_child_of_node(host->parent->of_node, node) {
   1098		if (mmc_of_get_func_num(node) == func_num)
   1099			return node;
   1100	}
   1101
   1102	return NULL;
   1103}
   1104
   1105/*
   1106 * Mask off any voltages we don't support and select
   1107 * the lowest voltage
   1108 */
   1109u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
   1110{
   1111	int bit;
   1112
   1113	/*
   1114	 * Sanity check the voltages that the card claims to
   1115	 * support.
   1116	 */
   1117	if (ocr & 0x7F) {
   1118		dev_warn(mmc_dev(host),
   1119		"card claims to support voltages below defined range\n");
   1120		ocr &= ~0x7F;
   1121	}
   1122
   1123	ocr &= host->ocr_avail;
   1124	if (!ocr) {
   1125		dev_warn(mmc_dev(host), "no support for card's volts\n");
   1126		return 0;
   1127	}
   1128
   1129	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
   1130		bit = ffs(ocr) - 1;
   1131		ocr &= 3 << bit;
   1132		mmc_power_cycle(host, ocr);
   1133	} else {
   1134		bit = fls(ocr) - 1;
   1135		ocr &= 3 << bit;
   1136		if (bit != host->ios.vdd)
   1137			dev_warn(mmc_dev(host), "exceeding card's volts\n");
   1138	}
   1139
   1140	return ocr;
   1141}
   1142
   1143int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
   1144{
   1145	int err = 0;
   1146	int old_signal_voltage = host->ios.signal_voltage;
   1147
   1148	host->ios.signal_voltage = signal_voltage;
   1149	if (host->ops->start_signal_voltage_switch)
   1150		err = host->ops->start_signal_voltage_switch(host, &host->ios);
   1151
   1152	if (err)
   1153		host->ios.signal_voltage = old_signal_voltage;
   1154
   1155	return err;
   1156
   1157}
   1158
   1159void mmc_set_initial_signal_voltage(struct mmc_host *host)
   1160{
   1161	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
   1162	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
   1163		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
   1164	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
   1165		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
   1166	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
   1167		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
   1168}
   1169
   1170int mmc_host_set_uhs_voltage(struct mmc_host *host)
   1171{
   1172	u32 clock;
   1173
   1174	/*
   1175	 * During a signal voltage level switch, the clock must be gated
   1176	 * for 5 ms according to the SD spec
   1177	 */
   1178	clock = host->ios.clock;
   1179	host->ios.clock = 0;
   1180	mmc_set_ios(host);
   1181
   1182	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
   1183		return -EAGAIN;
   1184
   1185	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
   1186	mmc_delay(10);
   1187	host->ios.clock = clock;
   1188	mmc_set_ios(host);
   1189
   1190	return 0;
   1191}
   1192
   1193int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
   1194{
   1195	struct mmc_command cmd = {};
   1196	int err = 0;
   1197
   1198	/*
   1199	 * If we cannot switch voltages, return failure so the caller
   1200	 * can continue without UHS mode
   1201	 */
   1202	if (!host->ops->start_signal_voltage_switch)
   1203		return -EPERM;
   1204	if (!host->ops->card_busy)
   1205		pr_warn("%s: cannot verify signal voltage switch\n",
   1206			mmc_hostname(host));
   1207
   1208	cmd.opcode = SD_SWITCH_VOLTAGE;
   1209	cmd.arg = 0;
   1210	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
   1211
   1212	err = mmc_wait_for_cmd(host, &cmd, 0);
   1213	if (err)
   1214		goto power_cycle;
   1215
   1216	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
   1217		return -EIO;
   1218
   1219	/*
   1220	 * The card should drive cmd and dat[0:3] low immediately
   1221	 * after the response of cmd11, but wait 1 ms to be sure
   1222	 */
   1223	mmc_delay(1);
   1224	if (host->ops->card_busy && !host->ops->card_busy(host)) {
   1225		err = -EAGAIN;
   1226		goto power_cycle;
   1227	}
   1228
   1229	if (mmc_host_set_uhs_voltage(host)) {
   1230		/*
   1231		 * Voltages may not have been switched, but we've already
   1232		 * sent CMD11, so a power cycle is required anyway
   1233		 */
   1234		err = -EAGAIN;
   1235		goto power_cycle;
   1236	}
   1237
   1238	/* Wait for at least 1 ms according to spec */
   1239	mmc_delay(1);
   1240
   1241	/*
   1242	 * Failure to switch is indicated by the card holding
   1243	 * dat[0:3] low
   1244	 */
   1245	if (host->ops->card_busy && host->ops->card_busy(host))
   1246		err = -EAGAIN;
   1247
   1248power_cycle:
   1249	if (err) {
   1250		pr_debug("%s: Signal voltage switch failed, "
   1251			"power cycling card\n", mmc_hostname(host));
   1252		mmc_power_cycle(host, ocr);
   1253	}
   1254
   1255	return err;
   1256}
   1257
   1258/*
   1259 * Select timing parameters for host.
   1260 */
   1261void mmc_set_timing(struct mmc_host *host, unsigned int timing)
   1262{
   1263	host->ios.timing = timing;
   1264	mmc_set_ios(host);
   1265}
   1266
   1267/*
   1268 * Select appropriate driver type for host.
   1269 */
   1270void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
   1271{
   1272	host->ios.drv_type = drv_type;
   1273	mmc_set_ios(host);
   1274}
   1275
   1276int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
   1277			      int card_drv_type, int *drv_type)
   1278{
   1279	struct mmc_host *host = card->host;
   1280	int host_drv_type = SD_DRIVER_TYPE_B;
   1281
   1282	*drv_type = 0;
   1283
   1284	if (!host->ops->select_drive_strength)
   1285		return 0;
   1286
   1287	/* Use SD definition of driver strength for hosts */
   1288	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
   1289		host_drv_type |= SD_DRIVER_TYPE_A;
   1290
   1291	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
   1292		host_drv_type |= SD_DRIVER_TYPE_C;
   1293
   1294	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
   1295		host_drv_type |= SD_DRIVER_TYPE_D;
   1296
   1297	/*
   1298	 * The drive strength that the hardware can support
   1299	 * depends on the board design.  Pass the appropriate
   1300	 * information and let the hardware specific code
   1301	 * return what is possible given the options
   1302	 */
   1303	return host->ops->select_drive_strength(card, max_dtr,
   1304						host_drv_type,
   1305						card_drv_type,
   1306						drv_type);
   1307}
   1308
   1309/*
   1310 * Apply power to the MMC stack.  This is a two-stage process.
   1311 * First, we enable power to the card without the clock running.
   1312 * We then wait a bit for the power to stabilise.  Finally,
   1313 * enable the bus drivers and clock to the card.
   1314 *
   1315 * We must _NOT_ enable the clock prior to power stablising.
   1316 *
   1317 * If a host does all the power sequencing itself, ignore the
   1318 * initial MMC_POWER_UP stage.
   1319 */
   1320void mmc_power_up(struct mmc_host *host, u32 ocr)
   1321{
   1322	if (host->ios.power_mode == MMC_POWER_ON)
   1323		return;
   1324
   1325	mmc_pwrseq_pre_power_on(host);
   1326
   1327	host->ios.vdd = fls(ocr) - 1;
   1328	host->ios.power_mode = MMC_POWER_UP;
   1329	/* Set initial state and call mmc_set_ios */
   1330	mmc_set_initial_state(host);
   1331
   1332	mmc_set_initial_signal_voltage(host);
   1333
   1334	/*
   1335	 * This delay should be sufficient to allow the power supply
   1336	 * to reach the minimum voltage.
   1337	 */
   1338	mmc_delay(host->ios.power_delay_ms);
   1339
   1340	mmc_pwrseq_post_power_on(host);
   1341
   1342	host->ios.clock = host->f_init;
   1343
   1344	host->ios.power_mode = MMC_POWER_ON;
   1345	mmc_set_ios(host);
   1346
   1347	/*
   1348	 * This delay must be at least 74 clock sizes, or 1 ms, or the
   1349	 * time required to reach a stable voltage.
   1350	 */
   1351	mmc_delay(host->ios.power_delay_ms);
   1352}
   1353
   1354void mmc_power_off(struct mmc_host *host)
   1355{
   1356	if (host->ios.power_mode == MMC_POWER_OFF)
   1357		return;
   1358
   1359	mmc_pwrseq_power_off(host);
   1360
   1361	host->ios.clock = 0;
   1362	host->ios.vdd = 0;
   1363
   1364	host->ios.power_mode = MMC_POWER_OFF;
   1365	/* Set initial state and call mmc_set_ios */
   1366	mmc_set_initial_state(host);
   1367
   1368	/*
   1369	 * Some configurations, such as the 802.11 SDIO card in the OLPC
   1370	 * XO-1.5, require a short delay after poweroff before the card
   1371	 * can be successfully turned on again.
   1372	 */
   1373	mmc_delay(1);
   1374}
   1375
   1376void mmc_power_cycle(struct mmc_host *host, u32 ocr)
   1377{
   1378	mmc_power_off(host);
   1379	/* Wait at least 1 ms according to SD spec */
   1380	mmc_delay(1);
   1381	mmc_power_up(host, ocr);
   1382}
   1383
   1384/*
   1385 * Assign a mmc bus handler to a host. Only one bus handler may control a
   1386 * host at any given time.
   1387 */
   1388void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
   1389{
   1390	host->bus_ops = ops;
   1391}
   1392
   1393/*
   1394 * Remove the current bus handler from a host.
   1395 */
   1396void mmc_detach_bus(struct mmc_host *host)
   1397{
   1398	host->bus_ops = NULL;
   1399}
   1400
   1401void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
   1402{
   1403	/*
   1404	 * Prevent system sleep for 5s to allow user space to consume the
   1405	 * corresponding uevent. This is especially useful, when CD irq is used
   1406	 * as a system wakeup, but doesn't hurt in other cases.
   1407	 */
   1408	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
   1409		__pm_wakeup_event(host->ws, 5000);
   1410
   1411	host->detect_change = 1;
   1412	mmc_schedule_delayed_work(&host->detect, delay);
   1413}
   1414
   1415/**
   1416 *	mmc_detect_change - process change of state on a MMC socket
   1417 *	@host: host which changed state.
   1418 *	@delay: optional delay to wait before detection (jiffies)
   1419 *
   1420 *	MMC drivers should call this when they detect a card has been
   1421 *	inserted or removed. The MMC layer will confirm that any
   1422 *	present card is still functional, and initialize any newly
   1423 *	inserted.
   1424 */
   1425void mmc_detect_change(struct mmc_host *host, unsigned long delay)
   1426{
   1427	_mmc_detect_change(host, delay, true);
   1428}
   1429EXPORT_SYMBOL(mmc_detect_change);
   1430
   1431void mmc_init_erase(struct mmc_card *card)
   1432{
   1433	unsigned int sz;
   1434
   1435	if (is_power_of_2(card->erase_size))
   1436		card->erase_shift = ffs(card->erase_size) - 1;
   1437	else
   1438		card->erase_shift = 0;
   1439
   1440	/*
   1441	 * It is possible to erase an arbitrarily large area of an SD or MMC
   1442	 * card.  That is not desirable because it can take a long time
   1443	 * (minutes) potentially delaying more important I/O, and also the
   1444	 * timeout calculations become increasingly hugely over-estimated.
   1445	 * Consequently, 'pref_erase' is defined as a guide to limit erases
   1446	 * to that size and alignment.
   1447	 *
   1448	 * For SD cards that define Allocation Unit size, limit erases to one
   1449	 * Allocation Unit at a time.
   1450	 * For MMC, have a stab at ai good value and for modern cards it will
   1451	 * end up being 4MiB. Note that if the value is too small, it can end
   1452	 * up taking longer to erase. Also note, erase_size is already set to
   1453	 * High Capacity Erase Size if available when this function is called.
   1454	 */
   1455	if (mmc_card_sd(card) && card->ssr.au) {
   1456		card->pref_erase = card->ssr.au;
   1457		card->erase_shift = ffs(card->ssr.au) - 1;
   1458	} else if (card->erase_size) {
   1459		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
   1460		if (sz < 128)
   1461			card->pref_erase = 512 * 1024 / 512;
   1462		else if (sz < 512)
   1463			card->pref_erase = 1024 * 1024 / 512;
   1464		else if (sz < 1024)
   1465			card->pref_erase = 2 * 1024 * 1024 / 512;
   1466		else
   1467			card->pref_erase = 4 * 1024 * 1024 / 512;
   1468		if (card->pref_erase < card->erase_size)
   1469			card->pref_erase = card->erase_size;
   1470		else {
   1471			sz = card->pref_erase % card->erase_size;
   1472			if (sz)
   1473				card->pref_erase += card->erase_size - sz;
   1474		}
   1475	} else
   1476		card->pref_erase = 0;
   1477}
   1478
   1479static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
   1480				          unsigned int arg, unsigned int qty)
   1481{
   1482	unsigned int erase_timeout;
   1483
   1484	if (arg == MMC_DISCARD_ARG ||
   1485	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
   1486		erase_timeout = card->ext_csd.trim_timeout;
   1487	} else if (card->ext_csd.erase_group_def & 1) {
   1488		/* High Capacity Erase Group Size uses HC timeouts */
   1489		if (arg == MMC_TRIM_ARG)
   1490			erase_timeout = card->ext_csd.trim_timeout;
   1491		else
   1492			erase_timeout = card->ext_csd.hc_erase_timeout;
   1493	} else {
   1494		/* CSD Erase Group Size uses write timeout */
   1495		unsigned int mult = (10 << card->csd.r2w_factor);
   1496		unsigned int timeout_clks = card->csd.taac_clks * mult;
   1497		unsigned int timeout_us;
   1498
   1499		/* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
   1500		if (card->csd.taac_ns < 1000000)
   1501			timeout_us = (card->csd.taac_ns * mult) / 1000;
   1502		else
   1503			timeout_us = (card->csd.taac_ns / 1000) * mult;
   1504
   1505		/*
   1506		 * ios.clock is only a target.  The real clock rate might be
   1507		 * less but not that much less, so fudge it by multiplying by 2.
   1508		 */
   1509		timeout_clks <<= 1;
   1510		timeout_us += (timeout_clks * 1000) /
   1511			      (card->host->ios.clock / 1000);
   1512
   1513		erase_timeout = timeout_us / 1000;
   1514
   1515		/*
   1516		 * Theoretically, the calculation could underflow so round up
   1517		 * to 1ms in that case.
   1518		 */
   1519		if (!erase_timeout)
   1520			erase_timeout = 1;
   1521	}
   1522
   1523	/* Multiplier for secure operations */
   1524	if (arg & MMC_SECURE_ARGS) {
   1525		if (arg == MMC_SECURE_ERASE_ARG)
   1526			erase_timeout *= card->ext_csd.sec_erase_mult;
   1527		else
   1528			erase_timeout *= card->ext_csd.sec_trim_mult;
   1529	}
   1530
   1531	erase_timeout *= qty;
   1532
   1533	/*
   1534	 * Ensure at least a 1 second timeout for SPI as per
   1535	 * 'mmc_set_data_timeout()'
   1536	 */
   1537	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
   1538		erase_timeout = 1000;
   1539
   1540	return erase_timeout;
   1541}
   1542
   1543static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
   1544					 unsigned int arg,
   1545					 unsigned int qty)
   1546{
   1547	unsigned int erase_timeout;
   1548
   1549	/* for DISCARD none of the below calculation applies.
   1550	 * the busy timeout is 250msec per discard command.
   1551	 */
   1552	if (arg == SD_DISCARD_ARG)
   1553		return SD_DISCARD_TIMEOUT_MS;
   1554
   1555	if (card->ssr.erase_timeout) {
   1556		/* Erase timeout specified in SD Status Register (SSR) */
   1557		erase_timeout = card->ssr.erase_timeout * qty +
   1558				card->ssr.erase_offset;
   1559	} else {
   1560		/*
   1561		 * Erase timeout not specified in SD Status Register (SSR) so
   1562		 * use 250ms per write block.
   1563		 */
   1564		erase_timeout = 250 * qty;
   1565	}
   1566
   1567	/* Must not be less than 1 second */
   1568	if (erase_timeout < 1000)
   1569		erase_timeout = 1000;
   1570
   1571	return erase_timeout;
   1572}
   1573
   1574static unsigned int mmc_erase_timeout(struct mmc_card *card,
   1575				      unsigned int arg,
   1576				      unsigned int qty)
   1577{
   1578	if (mmc_card_sd(card))
   1579		return mmc_sd_erase_timeout(card, arg, qty);
   1580	else
   1581		return mmc_mmc_erase_timeout(card, arg, qty);
   1582}
   1583
   1584static int mmc_do_erase(struct mmc_card *card, unsigned int from,
   1585			unsigned int to, unsigned int arg)
   1586{
   1587	struct mmc_command cmd = {};
   1588	unsigned int qty = 0, busy_timeout = 0;
   1589	bool use_r1b_resp;
   1590	int err;
   1591
   1592	mmc_retune_hold(card->host);
   1593
   1594	/*
   1595	 * qty is used to calculate the erase timeout which depends on how many
   1596	 * erase groups (or allocation units in SD terminology) are affected.
   1597	 * We count erasing part of an erase group as one erase group.
   1598	 * For SD, the allocation units are always a power of 2.  For MMC, the
   1599	 * erase group size is almost certainly also power of 2, but it does not
   1600	 * seem to insist on that in the JEDEC standard, so we fall back to
   1601	 * division in that case.  SD may not specify an allocation unit size,
   1602	 * in which case the timeout is based on the number of write blocks.
   1603	 *
   1604	 * Note that the timeout for secure trim 2 will only be correct if the
   1605	 * number of erase groups specified is the same as the total of all
   1606	 * preceding secure trim 1 commands.  Since the power may have been
   1607	 * lost since the secure trim 1 commands occurred, it is generally
   1608	 * impossible to calculate the secure trim 2 timeout correctly.
   1609	 */
   1610	if (card->erase_shift)
   1611		qty += ((to >> card->erase_shift) -
   1612			(from >> card->erase_shift)) + 1;
   1613	else if (mmc_card_sd(card))
   1614		qty += to - from + 1;
   1615	else
   1616		qty += ((to / card->erase_size) -
   1617			(from / card->erase_size)) + 1;
   1618
   1619	if (!mmc_card_blockaddr(card)) {
   1620		from <<= 9;
   1621		to <<= 9;
   1622	}
   1623
   1624	if (mmc_card_sd(card))
   1625		cmd.opcode = SD_ERASE_WR_BLK_START;
   1626	else
   1627		cmd.opcode = MMC_ERASE_GROUP_START;
   1628	cmd.arg = from;
   1629	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
   1630	err = mmc_wait_for_cmd(card->host, &cmd, 0);
   1631	if (err) {
   1632		pr_err("mmc_erase: group start error %d, "
   1633		       "status %#x\n", err, cmd.resp[0]);
   1634		err = -EIO;
   1635		goto out;
   1636	}
   1637
   1638	memset(&cmd, 0, sizeof(struct mmc_command));
   1639	if (mmc_card_sd(card))
   1640		cmd.opcode = SD_ERASE_WR_BLK_END;
   1641	else
   1642		cmd.opcode = MMC_ERASE_GROUP_END;
   1643	cmd.arg = to;
   1644	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
   1645	err = mmc_wait_for_cmd(card->host, &cmd, 0);
   1646	if (err) {
   1647		pr_err("mmc_erase: group end error %d, status %#x\n",
   1648		       err, cmd.resp[0]);
   1649		err = -EIO;
   1650		goto out;
   1651	}
   1652
   1653	memset(&cmd, 0, sizeof(struct mmc_command));
   1654	cmd.opcode = MMC_ERASE;
   1655	cmd.arg = arg;
   1656	busy_timeout = mmc_erase_timeout(card, arg, qty);
   1657	use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
   1658
   1659	err = mmc_wait_for_cmd(card->host, &cmd, 0);
   1660	if (err) {
   1661		pr_err("mmc_erase: erase error %d, status %#x\n",
   1662		       err, cmd.resp[0]);
   1663		err = -EIO;
   1664		goto out;
   1665	}
   1666
   1667	if (mmc_host_is_spi(card->host))
   1668		goto out;
   1669
   1670	/*
   1671	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
   1672	 * shall be avoided.
   1673	 */
   1674	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
   1675		goto out;
   1676
   1677	/* Let's poll to find out when the erase operation completes. */
   1678	err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
   1679
   1680out:
   1681	mmc_retune_release(card->host);
   1682	return err;
   1683}
   1684
   1685static unsigned int mmc_align_erase_size(struct mmc_card *card,
   1686					 unsigned int *from,
   1687					 unsigned int *to,
   1688					 unsigned int nr)
   1689{
   1690	unsigned int from_new = *from, nr_new = nr, rem;
   1691
   1692	/*
   1693	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
   1694	 * to align the erase size efficiently.
   1695	 */
   1696	if (is_power_of_2(card->erase_size)) {
   1697		unsigned int temp = from_new;
   1698
   1699		from_new = round_up(temp, card->erase_size);
   1700		rem = from_new - temp;
   1701
   1702		if (nr_new > rem)
   1703			nr_new -= rem;
   1704		else
   1705			return 0;
   1706
   1707		nr_new = round_down(nr_new, card->erase_size);
   1708	} else {
   1709		rem = from_new % card->erase_size;
   1710		if (rem) {
   1711			rem = card->erase_size - rem;
   1712			from_new += rem;
   1713			if (nr_new > rem)
   1714				nr_new -= rem;
   1715			else
   1716				return 0;
   1717		}
   1718
   1719		rem = nr_new % card->erase_size;
   1720		if (rem)
   1721			nr_new -= rem;
   1722	}
   1723
   1724	if (nr_new == 0)
   1725		return 0;
   1726
   1727	*to = from_new + nr_new;
   1728	*from = from_new;
   1729
   1730	return nr_new;
   1731}
   1732
   1733/**
   1734 * mmc_erase - erase sectors.
   1735 * @card: card to erase
   1736 * @from: first sector to erase
   1737 * @nr: number of sectors to erase
   1738 * @arg: erase command argument
   1739 *
   1740 * Caller must claim host before calling this function.
   1741 */
   1742int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
   1743	      unsigned int arg)
   1744{
   1745	unsigned int rem, to = from + nr;
   1746	int err;
   1747
   1748	if (!(card->csd.cmdclass & CCC_ERASE))
   1749		return -EOPNOTSUPP;
   1750
   1751	if (!card->erase_size)
   1752		return -EOPNOTSUPP;
   1753
   1754	if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
   1755		return -EOPNOTSUPP;
   1756
   1757	if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
   1758	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
   1759		return -EOPNOTSUPP;
   1760
   1761	if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
   1762	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
   1763		return -EOPNOTSUPP;
   1764
   1765	if (arg == MMC_SECURE_ERASE_ARG) {
   1766		if (from % card->erase_size || nr % card->erase_size)
   1767			return -EINVAL;
   1768	}
   1769
   1770	if (arg == MMC_ERASE_ARG)
   1771		nr = mmc_align_erase_size(card, &from, &to, nr);
   1772
   1773	if (nr == 0)
   1774		return 0;
   1775
   1776	if (to <= from)
   1777		return -EINVAL;
   1778
   1779	/* 'from' and 'to' are inclusive */
   1780	to -= 1;
   1781
   1782	/*
   1783	 * Special case where only one erase-group fits in the timeout budget:
   1784	 * If the region crosses an erase-group boundary on this particular
   1785	 * case, we will be trimming more than one erase-group which, does not
   1786	 * fit in the timeout budget of the controller, so we need to split it
   1787	 * and call mmc_do_erase() twice if necessary. This special case is
   1788	 * identified by the card->eg_boundary flag.
   1789	 */
   1790	rem = card->erase_size - (from % card->erase_size);
   1791	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
   1792		err = mmc_do_erase(card, from, from + rem - 1, arg);
   1793		from += rem;
   1794		if ((err) || (to <= from))
   1795			return err;
   1796	}
   1797
   1798	return mmc_do_erase(card, from, to, arg);
   1799}
   1800EXPORT_SYMBOL(mmc_erase);
   1801
   1802int mmc_can_erase(struct mmc_card *card)
   1803{
   1804	if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
   1805		return 1;
   1806	return 0;
   1807}
   1808EXPORT_SYMBOL(mmc_can_erase);
   1809
   1810int mmc_can_trim(struct mmc_card *card)
   1811{
   1812	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
   1813	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
   1814		return 1;
   1815	return 0;
   1816}
   1817EXPORT_SYMBOL(mmc_can_trim);
   1818
   1819int mmc_can_discard(struct mmc_card *card)
   1820{
   1821	/*
   1822	 * As there's no way to detect the discard support bit at v4.5
   1823	 * use the s/w feature support filed.
   1824	 */
   1825	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
   1826		return 1;
   1827	return 0;
   1828}
   1829EXPORT_SYMBOL(mmc_can_discard);
   1830
   1831int mmc_can_sanitize(struct mmc_card *card)
   1832{
   1833	if (!mmc_can_trim(card) && !mmc_can_erase(card))
   1834		return 0;
   1835	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
   1836		return 1;
   1837	return 0;
   1838}
   1839
   1840int mmc_can_secure_erase_trim(struct mmc_card *card)
   1841{
   1842	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
   1843	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
   1844		return 1;
   1845	return 0;
   1846}
   1847EXPORT_SYMBOL(mmc_can_secure_erase_trim);
   1848
   1849int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
   1850			    unsigned int nr)
   1851{
   1852	if (!card->erase_size)
   1853		return 0;
   1854	if (from % card->erase_size || nr % card->erase_size)
   1855		return 0;
   1856	return 1;
   1857}
   1858EXPORT_SYMBOL(mmc_erase_group_aligned);
   1859
   1860static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
   1861					    unsigned int arg)
   1862{
   1863	struct mmc_host *host = card->host;
   1864	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
   1865	unsigned int last_timeout = 0;
   1866	unsigned int max_busy_timeout = host->max_busy_timeout ?
   1867			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
   1868
   1869	if (card->erase_shift) {
   1870		max_qty = UINT_MAX >> card->erase_shift;
   1871		min_qty = card->pref_erase >> card->erase_shift;
   1872	} else if (mmc_card_sd(card)) {
   1873		max_qty = UINT_MAX;
   1874		min_qty = card->pref_erase;
   1875	} else {
   1876		max_qty = UINT_MAX / card->erase_size;
   1877		min_qty = card->pref_erase / card->erase_size;
   1878	}
   1879
   1880	/*
   1881	 * We should not only use 'host->max_busy_timeout' as the limitation
   1882	 * when deciding the max discard sectors. We should set a balance value
   1883	 * to improve the erase speed, and it can not get too long timeout at
   1884	 * the same time.
   1885	 *
   1886	 * Here we set 'card->pref_erase' as the minimal discard sectors no
   1887	 * matter what size of 'host->max_busy_timeout', but if the
   1888	 * 'host->max_busy_timeout' is large enough for more discard sectors,
   1889	 * then we can continue to increase the max discard sectors until we
   1890	 * get a balance value. In cases when the 'host->max_busy_timeout'
   1891	 * isn't specified, use the default max erase timeout.
   1892	 */
   1893	do {
   1894		y = 0;
   1895		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
   1896			timeout = mmc_erase_timeout(card, arg, qty + x);
   1897
   1898			if (qty + x > min_qty && timeout > max_busy_timeout)
   1899				break;
   1900
   1901			if (timeout < last_timeout)
   1902				break;
   1903			last_timeout = timeout;
   1904			y = x;
   1905		}
   1906		qty += y;
   1907	} while (y);
   1908
   1909	if (!qty)
   1910		return 0;
   1911
   1912	/*
   1913	 * When specifying a sector range to trim, chances are we might cross
   1914	 * an erase-group boundary even if the amount of sectors is less than
   1915	 * one erase-group.
   1916	 * If we can only fit one erase-group in the controller timeout budget,
   1917	 * we have to care that erase-group boundaries are not crossed by a
   1918	 * single trim operation. We flag that special case with "eg_boundary".
   1919	 * In all other cases we can just decrement qty and pretend that we
   1920	 * always touch (qty + 1) erase-groups as a simple optimization.
   1921	 */
   1922	if (qty == 1)
   1923		card->eg_boundary = 1;
   1924	else
   1925		qty--;
   1926
   1927	/* Convert qty to sectors */
   1928	if (card->erase_shift)
   1929		max_discard = qty << card->erase_shift;
   1930	else if (mmc_card_sd(card))
   1931		max_discard = qty + 1;
   1932	else
   1933		max_discard = qty * card->erase_size;
   1934
   1935	return max_discard;
   1936}
   1937
   1938unsigned int mmc_calc_max_discard(struct mmc_card *card)
   1939{
   1940	struct mmc_host *host = card->host;
   1941	unsigned int max_discard, max_trim;
   1942
   1943	/*
   1944	 * Without erase_group_def set, MMC erase timeout depends on clock
   1945	 * frequence which can change.  In that case, the best choice is
   1946	 * just the preferred erase size.
   1947	 */
   1948	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
   1949		return card->pref_erase;
   1950
   1951	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
   1952	if (mmc_can_trim(card)) {
   1953		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
   1954		if (max_trim < max_discard || max_discard == 0)
   1955			max_discard = max_trim;
   1956	} else if (max_discard < card->erase_size) {
   1957		max_discard = 0;
   1958	}
   1959	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
   1960		mmc_hostname(host), max_discard, host->max_busy_timeout ?
   1961		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
   1962	return max_discard;
   1963}
   1964EXPORT_SYMBOL(mmc_calc_max_discard);
   1965
   1966bool mmc_card_is_blockaddr(struct mmc_card *card)
   1967{
   1968	return card ? mmc_card_blockaddr(card) : false;
   1969}
   1970EXPORT_SYMBOL(mmc_card_is_blockaddr);
   1971
   1972int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
   1973{
   1974	struct mmc_command cmd = {};
   1975
   1976	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
   1977	    mmc_card_hs400(card) || mmc_card_hs400es(card))
   1978		return 0;
   1979
   1980	cmd.opcode = MMC_SET_BLOCKLEN;
   1981	cmd.arg = blocklen;
   1982	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
   1983	return mmc_wait_for_cmd(card->host, &cmd, 5);
   1984}
   1985EXPORT_SYMBOL(mmc_set_blocklen);
   1986
   1987static void mmc_hw_reset_for_init(struct mmc_host *host)
   1988{
   1989	mmc_pwrseq_reset(host);
   1990
   1991	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset)
   1992		return;
   1993	host->ops->card_hw_reset(host);
   1994}
   1995
   1996/**
   1997 * mmc_hw_reset - reset the card in hardware
   1998 * @card: card to be reset
   1999 *
   2000 * Hard reset the card. This function is only for upper layers, like the
   2001 * block layer or card drivers. You cannot use it in host drivers (struct
   2002 * mmc_card might be gone then).
   2003 *
   2004 * Return: 0 on success, -errno on failure
   2005 */
   2006int mmc_hw_reset(struct mmc_card *card)
   2007{
   2008	struct mmc_host *host = card->host;
   2009	int ret;
   2010
   2011	ret = host->bus_ops->hw_reset(host);
   2012	if (ret < 0)
   2013		pr_warn("%s: tried to HW reset card, got error %d\n",
   2014			mmc_hostname(host), ret);
   2015
   2016	return ret;
   2017}
   2018EXPORT_SYMBOL(mmc_hw_reset);
   2019
   2020int mmc_sw_reset(struct mmc_card *card)
   2021{
   2022	struct mmc_host *host = card->host;
   2023	int ret;
   2024
   2025	if (!host->bus_ops->sw_reset)
   2026		return -EOPNOTSUPP;
   2027
   2028	ret = host->bus_ops->sw_reset(host);
   2029	if (ret)
   2030		pr_warn("%s: tried to SW reset card, got error %d\n",
   2031			mmc_hostname(host), ret);
   2032
   2033	return ret;
   2034}
   2035EXPORT_SYMBOL(mmc_sw_reset);
   2036
   2037static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
   2038{
   2039	host->f_init = freq;
   2040
   2041	pr_debug("%s: %s: trying to init card at %u Hz\n",
   2042		mmc_hostname(host), __func__, host->f_init);
   2043
   2044	mmc_power_up(host, host->ocr_avail);
   2045
   2046	/*
   2047	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
   2048	 * do a hardware reset if possible.
   2049	 */
   2050	mmc_hw_reset_for_init(host);
   2051
   2052	/*
   2053	 * sdio_reset sends CMD52 to reset card.  Since we do not know
   2054	 * if the card is being re-initialized, just send it.  CMD52
   2055	 * should be ignored by SD/eMMC cards.
   2056	 * Skip it if we already know that we do not support SDIO commands
   2057	 */
   2058	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
   2059		sdio_reset(host);
   2060
   2061	mmc_go_idle(host);
   2062
   2063	if (!(host->caps2 & MMC_CAP2_NO_SD)) {
   2064		if (mmc_send_if_cond_pcie(host, host->ocr_avail))
   2065			goto out;
   2066		if (mmc_card_sd_express(host))
   2067			return 0;
   2068	}
   2069
   2070	/* Order's important: probe SDIO, then SD, then MMC */
   2071	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
   2072		if (!mmc_attach_sdio(host))
   2073			return 0;
   2074
   2075	if (!(host->caps2 & MMC_CAP2_NO_SD))
   2076		if (!mmc_attach_sd(host))
   2077			return 0;
   2078
   2079	if (!(host->caps2 & MMC_CAP2_NO_MMC))
   2080		if (!mmc_attach_mmc(host))
   2081			return 0;
   2082
   2083out:
   2084	mmc_power_off(host);
   2085	return -EIO;
   2086}
   2087
   2088int _mmc_detect_card_removed(struct mmc_host *host)
   2089{
   2090	int ret;
   2091
   2092	if (!host->card || mmc_card_removed(host->card))
   2093		return 1;
   2094
   2095	ret = host->bus_ops->alive(host);
   2096
   2097	/*
   2098	 * Card detect status and alive check may be out of sync if card is
   2099	 * removed slowly, when card detect switch changes while card/slot
   2100	 * pads are still contacted in hardware (refer to "SD Card Mechanical
   2101	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
   2102	 * detect work 200ms later for this case.
   2103	 */
   2104	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
   2105		mmc_detect_change(host, msecs_to_jiffies(200));
   2106		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
   2107	}
   2108
   2109	if (ret) {
   2110		mmc_card_set_removed(host->card);
   2111		pr_debug("%s: card remove detected\n", mmc_hostname(host));
   2112	}
   2113
   2114	return ret;
   2115}
   2116
   2117int mmc_detect_card_removed(struct mmc_host *host)
   2118{
   2119	struct mmc_card *card = host->card;
   2120	int ret;
   2121
   2122	WARN_ON(!host->claimed);
   2123
   2124	if (!card)
   2125		return 1;
   2126
   2127	if (!mmc_card_is_removable(host))
   2128		return 0;
   2129
   2130	ret = mmc_card_removed(card);
   2131	/*
   2132	 * The card will be considered unchanged unless we have been asked to
   2133	 * detect a change or host requires polling to provide card detection.
   2134	 */
   2135	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
   2136		return ret;
   2137
   2138	host->detect_change = 0;
   2139	if (!ret) {
   2140		ret = _mmc_detect_card_removed(host);
   2141		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
   2142			/*
   2143			 * Schedule a detect work as soon as possible to let a
   2144			 * rescan handle the card removal.
   2145			 */
   2146			cancel_delayed_work(&host->detect);
   2147			_mmc_detect_change(host, 0, false);
   2148		}
   2149	}
   2150
   2151	return ret;
   2152}
   2153EXPORT_SYMBOL(mmc_detect_card_removed);
   2154
   2155int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
   2156{
   2157	unsigned int boot_sectors_num;
   2158
   2159	if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
   2160		return -EOPNOTSUPP;
   2161
   2162	/* filter out unrelated cards */
   2163	if (card->ext_csd.rev < 3 ||
   2164	    !mmc_card_mmc(card) ||
   2165	    !mmc_card_is_blockaddr(card) ||
   2166	     mmc_card_is_removable(card->host))
   2167		return -ENOENT;
   2168
   2169	/*
   2170	 * eMMC storage has two special boot partitions in addition to the
   2171	 * main one.  NVIDIA's bootloader linearizes eMMC boot0->boot1->main
   2172	 * accesses, this means that the partition table addresses are shifted
   2173	 * by the size of boot partitions.  In accordance with the eMMC
   2174	 * specification, the boot partition size is calculated as follows:
   2175	 *
   2176	 *	boot partition size = 128K byte x BOOT_SIZE_MULT
   2177	 *
   2178	 * Calculate number of sectors occupied by the both boot partitions.
   2179	 */
   2180	boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
   2181			   SZ_512 * MMC_NUM_BOOT_PARTITION;
   2182
   2183	/* Defined by NVIDIA and used by Android devices. */
   2184	*gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
   2185
   2186	return 0;
   2187}
   2188EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
   2189
   2190void mmc_rescan(struct work_struct *work)
   2191{
   2192	struct mmc_host *host =
   2193		container_of(work, struct mmc_host, detect.work);
   2194	int i;
   2195
   2196	if (host->rescan_disable)
   2197		return;
   2198
   2199	/* If there is a non-removable card registered, only scan once */
   2200	if (!mmc_card_is_removable(host) && host->rescan_entered)
   2201		return;
   2202	host->rescan_entered = 1;
   2203
   2204	if (host->trigger_card_event && host->ops->card_event) {
   2205		mmc_claim_host(host);
   2206		host->ops->card_event(host);
   2207		mmc_release_host(host);
   2208		host->trigger_card_event = false;
   2209	}
   2210
   2211	/* Verify a registered card to be functional, else remove it. */
   2212	if (host->bus_ops)
   2213		host->bus_ops->detect(host);
   2214
   2215	host->detect_change = 0;
   2216
   2217	/* if there still is a card present, stop here */
   2218	if (host->bus_ops != NULL)
   2219		goto out;
   2220
   2221	mmc_claim_host(host);
   2222	if (mmc_card_is_removable(host) && host->ops->get_cd &&
   2223			host->ops->get_cd(host) == 0) {
   2224		mmc_power_off(host);
   2225		mmc_release_host(host);
   2226		goto out;
   2227	}
   2228
   2229	/* If an SD express card is present, then leave it as is. */
   2230	if (mmc_card_sd_express(host)) {
   2231		mmc_release_host(host);
   2232		goto out;
   2233	}
   2234
   2235	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
   2236		unsigned int freq = freqs[i];
   2237		if (freq > host->f_max) {
   2238			if (i + 1 < ARRAY_SIZE(freqs))
   2239				continue;
   2240			freq = host->f_max;
   2241		}
   2242		if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
   2243			break;
   2244		if (freqs[i] <= host->f_min)
   2245			break;
   2246	}
   2247	mmc_release_host(host);
   2248
   2249 out:
   2250	if (host->caps & MMC_CAP_NEEDS_POLL)
   2251		mmc_schedule_delayed_work(&host->detect, HZ);
   2252}
   2253
   2254void mmc_start_host(struct mmc_host *host)
   2255{
   2256	host->f_init = max(min(freqs[0], host->f_max), host->f_min);
   2257	host->rescan_disable = 0;
   2258
   2259	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
   2260		mmc_claim_host(host);
   2261		mmc_power_up(host, host->ocr_avail);
   2262		mmc_release_host(host);
   2263	}
   2264
   2265	mmc_gpiod_request_cd_irq(host);
   2266	_mmc_detect_change(host, 0, false);
   2267}
   2268
   2269void __mmc_stop_host(struct mmc_host *host)
   2270{
   2271	if (host->slot.cd_irq >= 0) {
   2272		mmc_gpio_set_cd_wake(host, false);
   2273		disable_irq(host->slot.cd_irq);
   2274	}
   2275
   2276	host->rescan_disable = 1;
   2277	cancel_delayed_work_sync(&host->detect);
   2278}
   2279
   2280void mmc_stop_host(struct mmc_host *host)
   2281{
   2282	__mmc_stop_host(host);
   2283
   2284	/* clear pm flags now and let card drivers set them as needed */
   2285	host->pm_flags = 0;
   2286
   2287	if (host->bus_ops) {
   2288		/* Calling bus_ops->remove() with a claimed host can deadlock */
   2289		host->bus_ops->remove(host);
   2290		mmc_claim_host(host);
   2291		mmc_detach_bus(host);
   2292		mmc_power_off(host);
   2293		mmc_release_host(host);
   2294		return;
   2295	}
   2296
   2297	mmc_claim_host(host);
   2298	mmc_power_off(host);
   2299	mmc_release_host(host);
   2300}
   2301
   2302static int __init mmc_init(void)
   2303{
   2304	int ret;
   2305
   2306	ret = mmc_register_bus();
   2307	if (ret)
   2308		return ret;
   2309
   2310	ret = mmc_register_host_class();
   2311	if (ret)
   2312		goto unregister_bus;
   2313
   2314	ret = sdio_register_bus();
   2315	if (ret)
   2316		goto unregister_host_class;
   2317
   2318	return 0;
   2319
   2320unregister_host_class:
   2321	mmc_unregister_host_class();
   2322unregister_bus:
   2323	mmc_unregister_bus();
   2324	return ret;
   2325}
   2326
   2327static void __exit mmc_exit(void)
   2328{
   2329	sdio_unregister_bus();
   2330	mmc_unregister_host_class();
   2331	mmc_unregister_bus();
   2332}
   2333
   2334subsys_initcall(mmc_init);
   2335module_exit(mmc_exit);
   2336
   2337MODULE_LICENSE("GPL");