cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

block.c (78902B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Block driver for media (i.e., flash cards)
      4 *
      5 * Copyright 2002 Hewlett-Packard Company
      6 * Copyright 2005-2008 Pierre Ossman
      7 *
      8 * Use consistent with the GNU GPL is permitted,
      9 * provided that this copyright notice is
     10 * preserved in its entirety in all copies and derived works.
     11 *
     12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
     13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
     14 * FITNESS FOR ANY PARTICULAR PURPOSE.
     15 *
     16 * Many thanks to Alessandro Rubini and Jonathan Corbet!
     17 *
     18 * Author:  Andrew Christian
     19 *          28 May 2002
     20 */
     21#include <linux/moduleparam.h>
     22#include <linux/module.h>
     23#include <linux/init.h>
     24
     25#include <linux/kernel.h>
     26#include <linux/fs.h>
     27#include <linux/slab.h>
     28#include <linux/errno.h>
     29#include <linux/hdreg.h>
     30#include <linux/kdev_t.h>
     31#include <linux/kref.h>
     32#include <linux/blkdev.h>
     33#include <linux/cdev.h>
     34#include <linux/mutex.h>
     35#include <linux/scatterlist.h>
     36#include <linux/string_helpers.h>
     37#include <linux/delay.h>
     38#include <linux/capability.h>
     39#include <linux/compat.h>
     40#include <linux/pm_runtime.h>
     41#include <linux/idr.h>
     42#include <linux/debugfs.h>
     43
     44#include <linux/mmc/ioctl.h>
     45#include <linux/mmc/card.h>
     46#include <linux/mmc/host.h>
     47#include <linux/mmc/mmc.h>
     48#include <linux/mmc/sd.h>
     49
     50#include <linux/uaccess.h>
     51
     52#include "queue.h"
     53#include "block.h"
     54#include "core.h"
     55#include "card.h"
     56#include "crypto.h"
     57#include "host.h"
     58#include "bus.h"
     59#include "mmc_ops.h"
     60#include "quirks.h"
     61#include "sd_ops.h"
     62
     63MODULE_ALIAS("mmc:block");
     64#ifdef MODULE_PARAM_PREFIX
     65#undef MODULE_PARAM_PREFIX
     66#endif
     67#define MODULE_PARAM_PREFIX "mmcblk."
     68
     69/*
     70 * Set a 10 second timeout for polling write request busy state. Note, mmc core
     71 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
     72 * second software timer to timeout the whole request, so 10 seconds should be
     73 * ample.
     74 */
     75#define MMC_BLK_TIMEOUT_MS  (10 * 1000)
     76#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
     77#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
     78
     79#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
     80				  (rq_data_dir(req) == WRITE))
     81static DEFINE_MUTEX(block_mutex);
     82
     83/*
     84 * The defaults come from config options but can be overriden by module
     85 * or bootarg options.
     86 */
     87static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
     88
     89/*
     90 * We've only got one major, so number of mmcblk devices is
     91 * limited to (1 << 20) / number of minors per device.  It is also
     92 * limited by the MAX_DEVICES below.
     93 */
     94static int max_devices;
     95
     96#define MAX_DEVICES 256
     97
     98static DEFINE_IDA(mmc_blk_ida);
     99static DEFINE_IDA(mmc_rpmb_ida);
    100
    101struct mmc_blk_busy_data {
    102	struct mmc_card *card;
    103	u32 status;
    104};
    105
    106/*
    107 * There is one mmc_blk_data per slot.
    108 */
    109struct mmc_blk_data {
    110	struct device	*parent;
    111	struct gendisk	*disk;
    112	struct mmc_queue queue;
    113	struct list_head part;
    114	struct list_head rpmbs;
    115
    116	unsigned int	flags;
    117#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
    118#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
    119
    120	struct kref	kref;
    121	unsigned int	read_only;
    122	unsigned int	part_type;
    123	unsigned int	reset_done;
    124#define MMC_BLK_READ		BIT(0)
    125#define MMC_BLK_WRITE		BIT(1)
    126#define MMC_BLK_DISCARD		BIT(2)
    127#define MMC_BLK_SECDISCARD	BIT(3)
    128#define MMC_BLK_CQE_RECOVERY	BIT(4)
    129#define MMC_BLK_TRIM		BIT(5)
    130
    131	/*
    132	 * Only set in main mmc_blk_data associated
    133	 * with mmc_card with dev_set_drvdata, and keeps
    134	 * track of the current selected device partition.
    135	 */
    136	unsigned int	part_curr;
    137	int	area_type;
    138
    139	/* debugfs files (only in main mmc_blk_data) */
    140	struct dentry *status_dentry;
    141	struct dentry *ext_csd_dentry;
    142};
    143
    144/* Device type for RPMB character devices */
    145static dev_t mmc_rpmb_devt;
    146
    147/* Bus type for RPMB character devices */
    148static struct bus_type mmc_rpmb_bus_type = {
    149	.name = "mmc_rpmb",
    150};
    151
    152/**
    153 * struct mmc_rpmb_data - special RPMB device type for these areas
    154 * @dev: the device for the RPMB area
    155 * @chrdev: character device for the RPMB area
    156 * @id: unique device ID number
    157 * @part_index: partition index (0 on first)
    158 * @md: parent MMC block device
    159 * @node: list item, so we can put this device on a list
    160 */
    161struct mmc_rpmb_data {
    162	struct device dev;
    163	struct cdev chrdev;
    164	int id;
    165	unsigned int part_index;
    166	struct mmc_blk_data *md;
    167	struct list_head node;
    168};
    169
    170static DEFINE_MUTEX(open_lock);
    171
    172module_param(perdev_minors, int, 0444);
    173MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
    174
    175static inline int mmc_blk_part_switch(struct mmc_card *card,
    176				      unsigned int part_type);
    177static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
    178			       struct mmc_card *card,
    179			       int disable_multi,
    180			       struct mmc_queue *mq);
    181static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
    182
    183static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
    184{
    185	struct mmc_blk_data *md;
    186
    187	mutex_lock(&open_lock);
    188	md = disk->private_data;
    189	if (md && !kref_get_unless_zero(&md->kref))
    190		md = NULL;
    191	mutex_unlock(&open_lock);
    192
    193	return md;
    194}
    195
    196static inline int mmc_get_devidx(struct gendisk *disk)
    197{
    198	int devidx = disk->first_minor / perdev_minors;
    199	return devidx;
    200}
    201
    202static void mmc_blk_kref_release(struct kref *ref)
    203{
    204	struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
    205	int devidx;
    206
    207	devidx = mmc_get_devidx(md->disk);
    208	ida_simple_remove(&mmc_blk_ida, devidx);
    209
    210	mutex_lock(&open_lock);
    211	md->disk->private_data = NULL;
    212	mutex_unlock(&open_lock);
    213
    214	put_disk(md->disk);
    215	kfree(md);
    216}
    217
    218static void mmc_blk_put(struct mmc_blk_data *md)
    219{
    220	kref_put(&md->kref, mmc_blk_kref_release);
    221}
    222
    223static ssize_t power_ro_lock_show(struct device *dev,
    224		struct device_attribute *attr, char *buf)
    225{
    226	int ret;
    227	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
    228	struct mmc_card *card = md->queue.card;
    229	int locked = 0;
    230
    231	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
    232		locked = 2;
    233	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
    234		locked = 1;
    235
    236	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
    237
    238	mmc_blk_put(md);
    239
    240	return ret;
    241}
    242
    243static ssize_t power_ro_lock_store(struct device *dev,
    244		struct device_attribute *attr, const char *buf, size_t count)
    245{
    246	int ret;
    247	struct mmc_blk_data *md, *part_md;
    248	struct mmc_queue *mq;
    249	struct request *req;
    250	unsigned long set;
    251
    252	if (kstrtoul(buf, 0, &set))
    253		return -EINVAL;
    254
    255	if (set != 1)
    256		return count;
    257
    258	md = mmc_blk_get(dev_to_disk(dev));
    259	mq = &md->queue;
    260
    261	/* Dispatch locking to the block layer */
    262	req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
    263	if (IS_ERR(req)) {
    264		count = PTR_ERR(req);
    265		goto out_put;
    266	}
    267	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
    268	blk_execute_rq(req, false);
    269	ret = req_to_mmc_queue_req(req)->drv_op_result;
    270	blk_mq_free_request(req);
    271
    272	if (!ret) {
    273		pr_info("%s: Locking boot partition ro until next power on\n",
    274			md->disk->disk_name);
    275		set_disk_ro(md->disk, 1);
    276
    277		list_for_each_entry(part_md, &md->part, part)
    278			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
    279				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
    280				set_disk_ro(part_md->disk, 1);
    281			}
    282	}
    283out_put:
    284	mmc_blk_put(md);
    285	return count;
    286}
    287
    288static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
    289		power_ro_lock_show, power_ro_lock_store);
    290
    291static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
    292			     char *buf)
    293{
    294	int ret;
    295	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
    296
    297	ret = snprintf(buf, PAGE_SIZE, "%d\n",
    298		       get_disk_ro(dev_to_disk(dev)) ^
    299		       md->read_only);
    300	mmc_blk_put(md);
    301	return ret;
    302}
    303
    304static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
    305			      const char *buf, size_t count)
    306{
    307	int ret;
    308	char *end;
    309	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
    310	unsigned long set = simple_strtoul(buf, &end, 0);
    311	if (end == buf) {
    312		ret = -EINVAL;
    313		goto out;
    314	}
    315
    316	set_disk_ro(dev_to_disk(dev), set || md->read_only);
    317	ret = count;
    318out:
    319	mmc_blk_put(md);
    320	return ret;
    321}
    322
    323static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
    324
    325static struct attribute *mmc_disk_attrs[] = {
    326	&dev_attr_force_ro.attr,
    327	&dev_attr_ro_lock_until_next_power_on.attr,
    328	NULL,
    329};
    330
    331static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
    332		struct attribute *a, int n)
    333{
    334	struct device *dev = kobj_to_dev(kobj);
    335	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
    336	umode_t mode = a->mode;
    337
    338	if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
    339	    (md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
    340	    md->queue.card->ext_csd.boot_ro_lockable) {
    341		mode = S_IRUGO;
    342		if (!(md->queue.card->ext_csd.boot_ro_lock &
    343				EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
    344			mode |= S_IWUSR;
    345	}
    346
    347	mmc_blk_put(md);
    348	return mode;
    349}
    350
    351static const struct attribute_group mmc_disk_attr_group = {
    352	.is_visible	= mmc_disk_attrs_is_visible,
    353	.attrs		= mmc_disk_attrs,
    354};
    355
    356static const struct attribute_group *mmc_disk_attr_groups[] = {
    357	&mmc_disk_attr_group,
    358	NULL,
    359};
    360
    361static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
    362{
    363	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
    364	int ret = -ENXIO;
    365
    366	mutex_lock(&block_mutex);
    367	if (md) {
    368		ret = 0;
    369		if ((mode & FMODE_WRITE) && md->read_only) {
    370			mmc_blk_put(md);
    371			ret = -EROFS;
    372		}
    373	}
    374	mutex_unlock(&block_mutex);
    375
    376	return ret;
    377}
    378
    379static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
    380{
    381	struct mmc_blk_data *md = disk->private_data;
    382
    383	mutex_lock(&block_mutex);
    384	mmc_blk_put(md);
    385	mutex_unlock(&block_mutex);
    386}
    387
    388static int
    389mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
    390{
    391	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
    392	geo->heads = 4;
    393	geo->sectors = 16;
    394	return 0;
    395}
    396
    397struct mmc_blk_ioc_data {
    398	struct mmc_ioc_cmd ic;
    399	unsigned char *buf;
    400	u64 buf_bytes;
    401	struct mmc_rpmb_data *rpmb;
    402};
    403
    404static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
    405	struct mmc_ioc_cmd __user *user)
    406{
    407	struct mmc_blk_ioc_data *idata;
    408	int err;
    409
    410	idata = kmalloc(sizeof(*idata), GFP_KERNEL);
    411	if (!idata) {
    412		err = -ENOMEM;
    413		goto out;
    414	}
    415
    416	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
    417		err = -EFAULT;
    418		goto idata_err;
    419	}
    420
    421	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
    422	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
    423		err = -EOVERFLOW;
    424		goto idata_err;
    425	}
    426
    427	if (!idata->buf_bytes) {
    428		idata->buf = NULL;
    429		return idata;
    430	}
    431
    432	idata->buf = memdup_user((void __user *)(unsigned long)
    433				 idata->ic.data_ptr, idata->buf_bytes);
    434	if (IS_ERR(idata->buf)) {
    435		err = PTR_ERR(idata->buf);
    436		goto idata_err;
    437	}
    438
    439	return idata;
    440
    441idata_err:
    442	kfree(idata);
    443out:
    444	return ERR_PTR(err);
    445}
    446
    447static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
    448				      struct mmc_blk_ioc_data *idata)
    449{
    450	struct mmc_ioc_cmd *ic = &idata->ic;
    451
    452	if (copy_to_user(&(ic_ptr->response), ic->response,
    453			 sizeof(ic->response)))
    454		return -EFAULT;
    455
    456	if (!idata->ic.write_flag) {
    457		if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
    458				 idata->buf, idata->buf_bytes))
    459			return -EFAULT;
    460	}
    461
    462	return 0;
    463}
    464
    465static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
    466			       struct mmc_blk_ioc_data *idata)
    467{
    468	struct mmc_command cmd = {}, sbc = {};
    469	struct mmc_data data = {};
    470	struct mmc_request mrq = {};
    471	struct scatterlist sg;
    472	int err;
    473	unsigned int target_part;
    474
    475	if (!card || !md || !idata)
    476		return -EINVAL;
    477
    478	/*
    479	 * The RPMB accesses comes in from the character device, so we
    480	 * need to target these explicitly. Else we just target the
    481	 * partition type for the block device the ioctl() was issued
    482	 * on.
    483	 */
    484	if (idata->rpmb) {
    485		/* Support multiple RPMB partitions */
    486		target_part = idata->rpmb->part_index;
    487		target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
    488	} else {
    489		target_part = md->part_type;
    490	}
    491
    492	cmd.opcode = idata->ic.opcode;
    493	cmd.arg = idata->ic.arg;
    494	cmd.flags = idata->ic.flags;
    495
    496	if (idata->buf_bytes) {
    497		data.sg = &sg;
    498		data.sg_len = 1;
    499		data.blksz = idata->ic.blksz;
    500		data.blocks = idata->ic.blocks;
    501
    502		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
    503
    504		if (idata->ic.write_flag)
    505			data.flags = MMC_DATA_WRITE;
    506		else
    507			data.flags = MMC_DATA_READ;
    508
    509		/* data.flags must already be set before doing this. */
    510		mmc_set_data_timeout(&data, card);
    511
    512		/* Allow overriding the timeout_ns for empirical tuning. */
    513		if (idata->ic.data_timeout_ns)
    514			data.timeout_ns = idata->ic.data_timeout_ns;
    515
    516		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
    517			/*
    518			 * Pretend this is a data transfer and rely on the
    519			 * host driver to compute timeout.  When all host
    520			 * drivers support cmd.cmd_timeout for R1B, this
    521			 * can be changed to:
    522			 *
    523			 *     mrq.data = NULL;
    524			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
    525			 */
    526			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
    527		}
    528
    529		mrq.data = &data;
    530	}
    531
    532	mrq.cmd = &cmd;
    533
    534	err = mmc_blk_part_switch(card, target_part);
    535	if (err)
    536		return err;
    537
    538	if (idata->ic.is_acmd) {
    539		err = mmc_app_cmd(card->host, card);
    540		if (err)
    541			return err;
    542	}
    543
    544	if (idata->rpmb) {
    545		sbc.opcode = MMC_SET_BLOCK_COUNT;
    546		/*
    547		 * We don't do any blockcount validation because the max size
    548		 * may be increased by a future standard. We just copy the
    549		 * 'Reliable Write' bit here.
    550		 */
    551		sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
    552		sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
    553		mrq.sbc = &sbc;
    554	}
    555
    556	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
    557	    (cmd.opcode == MMC_SWITCH))
    558		return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
    559
    560	mmc_wait_for_req(card->host, &mrq);
    561	memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
    562
    563	if (cmd.error) {
    564		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
    565						__func__, cmd.error);
    566		return cmd.error;
    567	}
    568	if (data.error) {
    569		dev_err(mmc_dev(card->host), "%s: data error %d\n",
    570						__func__, data.error);
    571		return data.error;
    572	}
    573
    574	/*
    575	 * Make sure the cache of the PARTITION_CONFIG register and
    576	 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
    577	 * changed it successfully.
    578	 */
    579	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
    580	    (cmd.opcode == MMC_SWITCH)) {
    581		struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
    582		u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
    583
    584		/*
    585		 * Update cache so the next mmc_blk_part_switch call operates
    586		 * on up-to-date data.
    587		 */
    588		card->ext_csd.part_config = value;
    589		main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
    590	}
    591
    592	/*
    593	 * Make sure to update CACHE_CTRL in case it was changed. The cache
    594	 * will get turned back on if the card is re-initialized, e.g.
    595	 * suspend/resume or hw reset in recovery.
    596	 */
    597	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
    598	    (cmd.opcode == MMC_SWITCH)) {
    599		u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
    600
    601		card->ext_csd.cache_ctrl = value;
    602	}
    603
    604	/*
    605	 * According to the SD specs, some commands require a delay after
    606	 * issuing the command.
    607	 */
    608	if (idata->ic.postsleep_min_us)
    609		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
    610
    611	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
    612		/*
    613		 * Ensure RPMB/R1B command has completed by polling CMD13 "Send Status". Here we
    614		 * allow to override the default timeout value if a custom timeout is specified.
    615		 */
    616		err = mmc_poll_for_busy(card, idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS,
    617					false, MMC_BUSY_IO);
    618	}
    619
    620	return err;
    621}
    622
    623static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
    624			     struct mmc_ioc_cmd __user *ic_ptr,
    625			     struct mmc_rpmb_data *rpmb)
    626{
    627	struct mmc_blk_ioc_data *idata;
    628	struct mmc_blk_ioc_data *idatas[1];
    629	struct mmc_queue *mq;
    630	struct mmc_card *card;
    631	int err = 0, ioc_err = 0;
    632	struct request *req;
    633
    634	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
    635	if (IS_ERR(idata))
    636		return PTR_ERR(idata);
    637	/* This will be NULL on non-RPMB ioctl():s */
    638	idata->rpmb = rpmb;
    639
    640	card = md->queue.card;
    641	if (IS_ERR(card)) {
    642		err = PTR_ERR(card);
    643		goto cmd_done;
    644	}
    645
    646	/*
    647	 * Dispatch the ioctl() into the block request queue.
    648	 */
    649	mq = &md->queue;
    650	req = blk_mq_alloc_request(mq->queue,
    651		idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
    652	if (IS_ERR(req)) {
    653		err = PTR_ERR(req);
    654		goto cmd_done;
    655	}
    656	idatas[0] = idata;
    657	req_to_mmc_queue_req(req)->drv_op =
    658		rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
    659	req_to_mmc_queue_req(req)->drv_op_data = idatas;
    660	req_to_mmc_queue_req(req)->ioc_count = 1;
    661	blk_execute_rq(req, false);
    662	ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
    663	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
    664	blk_mq_free_request(req);
    665
    666cmd_done:
    667	kfree(idata->buf);
    668	kfree(idata);
    669	return ioc_err ? ioc_err : err;
    670}
    671
    672static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
    673				   struct mmc_ioc_multi_cmd __user *user,
    674				   struct mmc_rpmb_data *rpmb)
    675{
    676	struct mmc_blk_ioc_data **idata = NULL;
    677	struct mmc_ioc_cmd __user *cmds = user->cmds;
    678	struct mmc_card *card;
    679	struct mmc_queue *mq;
    680	int err = 0, ioc_err = 0;
    681	__u64 num_of_cmds;
    682	unsigned int i, n;
    683	struct request *req;
    684
    685	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
    686			   sizeof(num_of_cmds)))
    687		return -EFAULT;
    688
    689	if (!num_of_cmds)
    690		return 0;
    691
    692	if (num_of_cmds > MMC_IOC_MAX_CMDS)
    693		return -EINVAL;
    694
    695	n = num_of_cmds;
    696	idata = kcalloc(n, sizeof(*idata), GFP_KERNEL);
    697	if (!idata)
    698		return -ENOMEM;
    699
    700	for (i = 0; i < n; i++) {
    701		idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
    702		if (IS_ERR(idata[i])) {
    703			err = PTR_ERR(idata[i]);
    704			n = i;
    705			goto cmd_err;
    706		}
    707		/* This will be NULL on non-RPMB ioctl():s */
    708		idata[i]->rpmb = rpmb;
    709	}
    710
    711	card = md->queue.card;
    712	if (IS_ERR(card)) {
    713		err = PTR_ERR(card);
    714		goto cmd_err;
    715	}
    716
    717
    718	/*
    719	 * Dispatch the ioctl()s into the block request queue.
    720	 */
    721	mq = &md->queue;
    722	req = blk_mq_alloc_request(mq->queue,
    723		idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
    724	if (IS_ERR(req)) {
    725		err = PTR_ERR(req);
    726		goto cmd_err;
    727	}
    728	req_to_mmc_queue_req(req)->drv_op =
    729		rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
    730	req_to_mmc_queue_req(req)->drv_op_data = idata;
    731	req_to_mmc_queue_req(req)->ioc_count = n;
    732	blk_execute_rq(req, false);
    733	ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
    734
    735	/* copy to user if data and response */
    736	for (i = 0; i < n && !err; i++)
    737		err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
    738
    739	blk_mq_free_request(req);
    740
    741cmd_err:
    742	for (i = 0; i < n; i++) {
    743		kfree(idata[i]->buf);
    744		kfree(idata[i]);
    745	}
    746	kfree(idata);
    747	return ioc_err ? ioc_err : err;
    748}
    749
    750static int mmc_blk_check_blkdev(struct block_device *bdev)
    751{
    752	/*
    753	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
    754	 * whole block device, not on a partition.  This prevents overspray
    755	 * between sibling partitions.
    756	 */
    757	if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
    758		return -EPERM;
    759	return 0;
    760}
    761
    762static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
    763	unsigned int cmd, unsigned long arg)
    764{
    765	struct mmc_blk_data *md;
    766	int ret;
    767
    768	switch (cmd) {
    769	case MMC_IOC_CMD:
    770		ret = mmc_blk_check_blkdev(bdev);
    771		if (ret)
    772			return ret;
    773		md = mmc_blk_get(bdev->bd_disk);
    774		if (!md)
    775			return -EINVAL;
    776		ret = mmc_blk_ioctl_cmd(md,
    777					(struct mmc_ioc_cmd __user *)arg,
    778					NULL);
    779		mmc_blk_put(md);
    780		return ret;
    781	case MMC_IOC_MULTI_CMD:
    782		ret = mmc_blk_check_blkdev(bdev);
    783		if (ret)
    784			return ret;
    785		md = mmc_blk_get(bdev->bd_disk);
    786		if (!md)
    787			return -EINVAL;
    788		ret = mmc_blk_ioctl_multi_cmd(md,
    789					(struct mmc_ioc_multi_cmd __user *)arg,
    790					NULL);
    791		mmc_blk_put(md);
    792		return ret;
    793	default:
    794		return -EINVAL;
    795	}
    796}
    797
    798#ifdef CONFIG_COMPAT
    799static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
    800	unsigned int cmd, unsigned long arg)
    801{
    802	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
    803}
    804#endif
    805
    806static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
    807					  sector_t *sector)
    808{
    809	struct mmc_blk_data *md;
    810	int ret;
    811
    812	md = mmc_blk_get(disk);
    813	if (!md)
    814		return -EINVAL;
    815
    816	if (md->queue.card)
    817		ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
    818	else
    819		ret = -ENODEV;
    820
    821	mmc_blk_put(md);
    822
    823	return ret;
    824}
    825
    826static const struct block_device_operations mmc_bdops = {
    827	.open			= mmc_blk_open,
    828	.release		= mmc_blk_release,
    829	.getgeo			= mmc_blk_getgeo,
    830	.owner			= THIS_MODULE,
    831	.ioctl			= mmc_blk_ioctl,
    832#ifdef CONFIG_COMPAT
    833	.compat_ioctl		= mmc_blk_compat_ioctl,
    834#endif
    835	.alternative_gpt_sector	= mmc_blk_alternative_gpt_sector,
    836};
    837
    838static int mmc_blk_part_switch_pre(struct mmc_card *card,
    839				   unsigned int part_type)
    840{
    841	int ret = 0;
    842
    843	if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
    844		if (card->ext_csd.cmdq_en) {
    845			ret = mmc_cmdq_disable(card);
    846			if (ret)
    847				return ret;
    848		}
    849		mmc_retune_pause(card->host);
    850	}
    851
    852	return ret;
    853}
    854
    855static int mmc_blk_part_switch_post(struct mmc_card *card,
    856				    unsigned int part_type)
    857{
    858	int ret = 0;
    859
    860	if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
    861		mmc_retune_unpause(card->host);
    862		if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
    863			ret = mmc_cmdq_enable(card);
    864	}
    865
    866	return ret;
    867}
    868
    869static inline int mmc_blk_part_switch(struct mmc_card *card,
    870				      unsigned int part_type)
    871{
    872	int ret = 0;
    873	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
    874
    875	if (main_md->part_curr == part_type)
    876		return 0;
    877
    878	if (mmc_card_mmc(card)) {
    879		u8 part_config = card->ext_csd.part_config;
    880
    881		ret = mmc_blk_part_switch_pre(card, part_type);
    882		if (ret)
    883			return ret;
    884
    885		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
    886		part_config |= part_type;
    887
    888		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
    889				 EXT_CSD_PART_CONFIG, part_config,
    890				 card->ext_csd.part_time);
    891		if (ret) {
    892			mmc_blk_part_switch_post(card, part_type);
    893			return ret;
    894		}
    895
    896		card->ext_csd.part_config = part_config;
    897
    898		ret = mmc_blk_part_switch_post(card, main_md->part_curr);
    899	}
    900
    901	main_md->part_curr = part_type;
    902	return ret;
    903}
    904
    905static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
    906{
    907	int err;
    908	u32 result;
    909	__be32 *blocks;
    910
    911	struct mmc_request mrq = {};
    912	struct mmc_command cmd = {};
    913	struct mmc_data data = {};
    914
    915	struct scatterlist sg;
    916
    917	cmd.opcode = MMC_APP_CMD;
    918	cmd.arg = card->rca << 16;
    919	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
    920
    921	err = mmc_wait_for_cmd(card->host, &cmd, 0);
    922	if (err)
    923		return err;
    924	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
    925		return -EIO;
    926
    927	memset(&cmd, 0, sizeof(struct mmc_command));
    928
    929	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
    930	cmd.arg = 0;
    931	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
    932
    933	data.blksz = 4;
    934	data.blocks = 1;
    935	data.flags = MMC_DATA_READ;
    936	data.sg = &sg;
    937	data.sg_len = 1;
    938	mmc_set_data_timeout(&data, card);
    939
    940	mrq.cmd = &cmd;
    941	mrq.data = &data;
    942
    943	blocks = kmalloc(4, GFP_KERNEL);
    944	if (!blocks)
    945		return -ENOMEM;
    946
    947	sg_init_one(&sg, blocks, 4);
    948
    949	mmc_wait_for_req(card->host, &mrq);
    950
    951	result = ntohl(*blocks);
    952	kfree(blocks);
    953
    954	if (cmd.error || data.error)
    955		return -EIO;
    956
    957	*written_blocks = result;
    958
    959	return 0;
    960}
    961
    962static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
    963{
    964	if (host->actual_clock)
    965		return host->actual_clock / 1000;
    966
    967	/* Clock may be subject to a divisor, fudge it by a factor of 2. */
    968	if (host->ios.clock)
    969		return host->ios.clock / 2000;
    970
    971	/* How can there be no clock */
    972	WARN_ON_ONCE(1);
    973	return 100; /* 100 kHz is minimum possible value */
    974}
    975
    976static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
    977					    struct mmc_data *data)
    978{
    979	unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
    980	unsigned int khz;
    981
    982	if (data->timeout_clks) {
    983		khz = mmc_blk_clock_khz(host);
    984		ms += DIV_ROUND_UP(data->timeout_clks, khz);
    985	}
    986
    987	return ms;
    988}
    989
    990static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
    991			 int type)
    992{
    993	int err;
    994
    995	if (md->reset_done & type)
    996		return -EEXIST;
    997
    998	md->reset_done |= type;
    999	err = mmc_hw_reset(host->card);
   1000	/* Ensure we switch back to the correct partition */
   1001	if (err) {
   1002		struct mmc_blk_data *main_md =
   1003			dev_get_drvdata(&host->card->dev);
   1004		int part_err;
   1005
   1006		main_md->part_curr = main_md->part_type;
   1007		part_err = mmc_blk_part_switch(host->card, md->part_type);
   1008		if (part_err) {
   1009			/*
   1010			 * We have failed to get back into the correct
   1011			 * partition, so we need to abort the whole request.
   1012			 */
   1013			return -ENODEV;
   1014		}
   1015	}
   1016	return err;
   1017}
   1018
   1019static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
   1020{
   1021	md->reset_done &= ~type;
   1022}
   1023
   1024/*
   1025 * The non-block commands come back from the block layer after it queued it and
   1026 * processed it with all other requests and then they get issued in this
   1027 * function.
   1028 */
   1029static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
   1030{
   1031	struct mmc_queue_req *mq_rq;
   1032	struct mmc_card *card = mq->card;
   1033	struct mmc_blk_data *md = mq->blkdata;
   1034	struct mmc_blk_ioc_data **idata;
   1035	bool rpmb_ioctl;
   1036	u8 **ext_csd;
   1037	u32 status;
   1038	int ret;
   1039	int i;
   1040
   1041	mq_rq = req_to_mmc_queue_req(req);
   1042	rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
   1043
   1044	switch (mq_rq->drv_op) {
   1045	case MMC_DRV_OP_IOCTL:
   1046		if (card->ext_csd.cmdq_en) {
   1047			ret = mmc_cmdq_disable(card);
   1048			if (ret)
   1049				break;
   1050		}
   1051		fallthrough;
   1052	case MMC_DRV_OP_IOCTL_RPMB:
   1053		idata = mq_rq->drv_op_data;
   1054		for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
   1055			ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
   1056			if (ret)
   1057				break;
   1058		}
   1059		/* Always switch back to main area after RPMB access */
   1060		if (rpmb_ioctl)
   1061			mmc_blk_part_switch(card, 0);
   1062		else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
   1063			mmc_cmdq_enable(card);
   1064		break;
   1065	case MMC_DRV_OP_BOOT_WP:
   1066		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
   1067				 card->ext_csd.boot_ro_lock |
   1068				 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
   1069				 card->ext_csd.part_time);
   1070		if (ret)
   1071			pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
   1072			       md->disk->disk_name, ret);
   1073		else
   1074			card->ext_csd.boot_ro_lock |=
   1075				EXT_CSD_BOOT_WP_B_PWR_WP_EN;
   1076		break;
   1077	case MMC_DRV_OP_GET_CARD_STATUS:
   1078		ret = mmc_send_status(card, &status);
   1079		if (!ret)
   1080			ret = status;
   1081		break;
   1082	case MMC_DRV_OP_GET_EXT_CSD:
   1083		ext_csd = mq_rq->drv_op_data;
   1084		ret = mmc_get_ext_csd(card, ext_csd);
   1085		break;
   1086	default:
   1087		pr_err("%s: unknown driver specific operation\n",
   1088		       md->disk->disk_name);
   1089		ret = -EINVAL;
   1090		break;
   1091	}
   1092	mq_rq->drv_op_result = ret;
   1093	blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
   1094}
   1095
   1096static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
   1097				   int type, unsigned int erase_arg)
   1098{
   1099	struct mmc_blk_data *md = mq->blkdata;
   1100	struct mmc_card *card = md->queue.card;
   1101	unsigned int from, nr;
   1102	int err = 0;
   1103	blk_status_t status = BLK_STS_OK;
   1104
   1105	if (!mmc_can_erase(card)) {
   1106		status = BLK_STS_NOTSUPP;
   1107		goto fail;
   1108	}
   1109
   1110	from = blk_rq_pos(req);
   1111	nr = blk_rq_sectors(req);
   1112
   1113	do {
   1114		err = 0;
   1115		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
   1116			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
   1117					 INAND_CMD38_ARG_EXT_CSD,
   1118					 erase_arg == MMC_TRIM_ARG ?
   1119					 INAND_CMD38_ARG_TRIM :
   1120					 INAND_CMD38_ARG_ERASE,
   1121					 card->ext_csd.generic_cmd6_time);
   1122		}
   1123		if (!err)
   1124			err = mmc_erase(card, from, nr, erase_arg);
   1125	} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
   1126	if (err)
   1127		status = BLK_STS_IOERR;
   1128	else
   1129		mmc_blk_reset_success(md, type);
   1130fail:
   1131	blk_mq_end_request(req, status);
   1132}
   1133
   1134static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
   1135{
   1136	mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG);
   1137}
   1138
   1139static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
   1140{
   1141	struct mmc_blk_data *md = mq->blkdata;
   1142	struct mmc_card *card = md->queue.card;
   1143
   1144	mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
   1145}
   1146
   1147static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
   1148				       struct request *req)
   1149{
   1150	struct mmc_blk_data *md = mq->blkdata;
   1151	struct mmc_card *card = md->queue.card;
   1152	unsigned int from, nr, arg;
   1153	int err = 0, type = MMC_BLK_SECDISCARD;
   1154	blk_status_t status = BLK_STS_OK;
   1155
   1156	if (!(mmc_can_secure_erase_trim(card))) {
   1157		status = BLK_STS_NOTSUPP;
   1158		goto out;
   1159	}
   1160
   1161	from = blk_rq_pos(req);
   1162	nr = blk_rq_sectors(req);
   1163
   1164	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
   1165		arg = MMC_SECURE_TRIM1_ARG;
   1166	else
   1167		arg = MMC_SECURE_ERASE_ARG;
   1168
   1169retry:
   1170	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
   1171		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
   1172				 INAND_CMD38_ARG_EXT_CSD,
   1173				 arg == MMC_SECURE_TRIM1_ARG ?
   1174				 INAND_CMD38_ARG_SECTRIM1 :
   1175				 INAND_CMD38_ARG_SECERASE,
   1176				 card->ext_csd.generic_cmd6_time);
   1177		if (err)
   1178			goto out_retry;
   1179	}
   1180
   1181	err = mmc_erase(card, from, nr, arg);
   1182	if (err == -EIO)
   1183		goto out_retry;
   1184	if (err) {
   1185		status = BLK_STS_IOERR;
   1186		goto out;
   1187	}
   1188
   1189	if (arg == MMC_SECURE_TRIM1_ARG) {
   1190		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
   1191			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
   1192					 INAND_CMD38_ARG_EXT_CSD,
   1193					 INAND_CMD38_ARG_SECTRIM2,
   1194					 card->ext_csd.generic_cmd6_time);
   1195			if (err)
   1196				goto out_retry;
   1197		}
   1198
   1199		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
   1200		if (err == -EIO)
   1201			goto out_retry;
   1202		if (err) {
   1203			status = BLK_STS_IOERR;
   1204			goto out;
   1205		}
   1206	}
   1207
   1208out_retry:
   1209	if (err && !mmc_blk_reset(md, card->host, type))
   1210		goto retry;
   1211	if (!err)
   1212		mmc_blk_reset_success(md, type);
   1213out:
   1214	blk_mq_end_request(req, status);
   1215}
   1216
   1217static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
   1218{
   1219	struct mmc_blk_data *md = mq->blkdata;
   1220	struct mmc_card *card = md->queue.card;
   1221	int ret = 0;
   1222
   1223	ret = mmc_flush_cache(card->host);
   1224	blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
   1225}
   1226
   1227/*
   1228 * Reformat current write as a reliable write, supporting
   1229 * both legacy and the enhanced reliable write MMC cards.
   1230 * In each transfer we'll handle only as much as a single
   1231 * reliable write can handle, thus finish the request in
   1232 * partial completions.
   1233 */
   1234static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
   1235				    struct mmc_card *card,
   1236				    struct request *req)
   1237{
   1238	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
   1239		/* Legacy mode imposes restrictions on transfers. */
   1240		if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
   1241			brq->data.blocks = 1;
   1242
   1243		if (brq->data.blocks > card->ext_csd.rel_sectors)
   1244			brq->data.blocks = card->ext_csd.rel_sectors;
   1245		else if (brq->data.blocks < card->ext_csd.rel_sectors)
   1246			brq->data.blocks = 1;
   1247	}
   1248}
   1249
   1250#define CMD_ERRORS_EXCL_OOR						\
   1251	(R1_ADDRESS_ERROR |	/* Misaligned address */		\
   1252	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
   1253	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
   1254	 R1_CARD_ECC_FAILED |	/* Card ECC failed */			\
   1255	 R1_CC_ERROR |		/* Card controller error */		\
   1256	 R1_ERROR)		/* General/unknown error */
   1257
   1258#define CMD_ERRORS							\
   1259	(CMD_ERRORS_EXCL_OOR |						\
   1260	 R1_OUT_OF_RANGE)	/* Command argument out of range */	\
   1261
   1262static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
   1263{
   1264	u32 val;
   1265
   1266	/*
   1267	 * Per the SD specification(physical layer version 4.10)[1],
   1268	 * section 4.3.3, it explicitly states that "When the last
   1269	 * block of user area is read using CMD18, the host should
   1270	 * ignore OUT_OF_RANGE error that may occur even the sequence
   1271	 * is correct". And JESD84-B51 for eMMC also has a similar
   1272	 * statement on section 6.8.3.
   1273	 *
   1274	 * Multiple block read/write could be done by either predefined
   1275	 * method, namely CMD23, or open-ending mode. For open-ending mode,
   1276	 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
   1277	 *
   1278	 * However the spec[1] doesn't tell us whether we should also
   1279	 * ignore that for predefined method. But per the spec[1], section
   1280	 * 4.15 Set Block Count Command, it says"If illegal block count
   1281	 * is set, out of range error will be indicated during read/write
   1282	 * operation (For example, data transfer is stopped at user area
   1283	 * boundary)." In another word, we could expect a out of range error
   1284	 * in the response for the following CMD18/25. And if argument of
   1285	 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
   1286	 * we could also expect to get a -ETIMEDOUT or any error number from
   1287	 * the host drivers due to missing data response(for write)/data(for
   1288	 * read), as the cards will stop the data transfer by itself per the
   1289	 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
   1290	 */
   1291
   1292	if (!brq->stop.error) {
   1293		bool oor_with_open_end;
   1294		/* If there is no error yet, check R1 response */
   1295
   1296		val = brq->stop.resp[0] & CMD_ERRORS;
   1297		oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
   1298
   1299		if (val && !oor_with_open_end)
   1300			brq->stop.error = -EIO;
   1301	}
   1302}
   1303
   1304static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
   1305			      int disable_multi, bool *do_rel_wr_p,
   1306			      bool *do_data_tag_p)
   1307{
   1308	struct mmc_blk_data *md = mq->blkdata;
   1309	struct mmc_card *card = md->queue.card;
   1310	struct mmc_blk_request *brq = &mqrq->brq;
   1311	struct request *req = mmc_queue_req_to_req(mqrq);
   1312	bool do_rel_wr, do_data_tag;
   1313
   1314	/*
   1315	 * Reliable writes are used to implement Forced Unit Access and
   1316	 * are supported only on MMCs.
   1317	 */
   1318	do_rel_wr = (req->cmd_flags & REQ_FUA) &&
   1319		    rq_data_dir(req) == WRITE &&
   1320		    (md->flags & MMC_BLK_REL_WR);
   1321
   1322	memset(brq, 0, sizeof(struct mmc_blk_request));
   1323
   1324	mmc_crypto_prepare_req(mqrq);
   1325
   1326	brq->mrq.data = &brq->data;
   1327	brq->mrq.tag = req->tag;
   1328
   1329	brq->stop.opcode = MMC_STOP_TRANSMISSION;
   1330	brq->stop.arg = 0;
   1331
   1332	if (rq_data_dir(req) == READ) {
   1333		brq->data.flags = MMC_DATA_READ;
   1334		brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
   1335	} else {
   1336		brq->data.flags = MMC_DATA_WRITE;
   1337		brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
   1338	}
   1339
   1340	brq->data.blksz = 512;
   1341	brq->data.blocks = blk_rq_sectors(req);
   1342	brq->data.blk_addr = blk_rq_pos(req);
   1343
   1344	/*
   1345	 * The command queue supports 2 priorities: "high" (1) and "simple" (0).
   1346	 * The eMMC will give "high" priority tasks priority over "simple"
   1347	 * priority tasks. Here we always set "simple" priority by not setting
   1348	 * MMC_DATA_PRIO.
   1349	 */
   1350
   1351	/*
   1352	 * The block layer doesn't support all sector count
   1353	 * restrictions, so we need to be prepared for too big
   1354	 * requests.
   1355	 */
   1356	if (brq->data.blocks > card->host->max_blk_count)
   1357		brq->data.blocks = card->host->max_blk_count;
   1358
   1359	if (brq->data.blocks > 1) {
   1360		/*
   1361		 * Some SD cards in SPI mode return a CRC error or even lock up
   1362		 * completely when trying to read the last block using a
   1363		 * multiblock read command.
   1364		 */
   1365		if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
   1366		    (blk_rq_pos(req) + blk_rq_sectors(req) ==
   1367		     get_capacity(md->disk)))
   1368			brq->data.blocks--;
   1369
   1370		/*
   1371		 * After a read error, we redo the request one sector
   1372		 * at a time in order to accurately determine which
   1373		 * sectors can be read successfully.
   1374		 */
   1375		if (disable_multi)
   1376			brq->data.blocks = 1;
   1377
   1378		/*
   1379		 * Some controllers have HW issues while operating
   1380		 * in multiple I/O mode
   1381		 */
   1382		if (card->host->ops->multi_io_quirk)
   1383			brq->data.blocks = card->host->ops->multi_io_quirk(card,
   1384						(rq_data_dir(req) == READ) ?
   1385						MMC_DATA_READ : MMC_DATA_WRITE,
   1386						brq->data.blocks);
   1387	}
   1388
   1389	if (do_rel_wr) {
   1390		mmc_apply_rel_rw(brq, card, req);
   1391		brq->data.flags |= MMC_DATA_REL_WR;
   1392	}
   1393
   1394	/*
   1395	 * Data tag is used only during writing meta data to speed
   1396	 * up write and any subsequent read of this meta data
   1397	 */
   1398	do_data_tag = card->ext_csd.data_tag_unit_size &&
   1399		      (req->cmd_flags & REQ_META) &&
   1400		      (rq_data_dir(req) == WRITE) &&
   1401		      ((brq->data.blocks * brq->data.blksz) >=
   1402		       card->ext_csd.data_tag_unit_size);
   1403
   1404	if (do_data_tag)
   1405		brq->data.flags |= MMC_DATA_DAT_TAG;
   1406
   1407	mmc_set_data_timeout(&brq->data, card);
   1408
   1409	brq->data.sg = mqrq->sg;
   1410	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
   1411
   1412	/*
   1413	 * Adjust the sg list so it is the same size as the
   1414	 * request.
   1415	 */
   1416	if (brq->data.blocks != blk_rq_sectors(req)) {
   1417		int i, data_size = brq->data.blocks << 9;
   1418		struct scatterlist *sg;
   1419
   1420		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
   1421			data_size -= sg->length;
   1422			if (data_size <= 0) {
   1423				sg->length += data_size;
   1424				i++;
   1425				break;
   1426			}
   1427		}
   1428		brq->data.sg_len = i;
   1429	}
   1430
   1431	if (do_rel_wr_p)
   1432		*do_rel_wr_p = do_rel_wr;
   1433
   1434	if (do_data_tag_p)
   1435		*do_data_tag_p = do_data_tag;
   1436}
   1437
   1438#define MMC_CQE_RETRIES 2
   1439
   1440static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
   1441{
   1442	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1443	struct mmc_request *mrq = &mqrq->brq.mrq;
   1444	struct request_queue *q = req->q;
   1445	struct mmc_host *host = mq->card->host;
   1446	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
   1447	unsigned long flags;
   1448	bool put_card;
   1449	int err;
   1450
   1451	mmc_cqe_post_req(host, mrq);
   1452
   1453	if (mrq->cmd && mrq->cmd->error)
   1454		err = mrq->cmd->error;
   1455	else if (mrq->data && mrq->data->error)
   1456		err = mrq->data->error;
   1457	else
   1458		err = 0;
   1459
   1460	if (err) {
   1461		if (mqrq->retries++ < MMC_CQE_RETRIES)
   1462			blk_mq_requeue_request(req, true);
   1463		else
   1464			blk_mq_end_request(req, BLK_STS_IOERR);
   1465	} else if (mrq->data) {
   1466		if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
   1467			blk_mq_requeue_request(req, true);
   1468		else
   1469			__blk_mq_end_request(req, BLK_STS_OK);
   1470	} else {
   1471		blk_mq_end_request(req, BLK_STS_OK);
   1472	}
   1473
   1474	spin_lock_irqsave(&mq->lock, flags);
   1475
   1476	mq->in_flight[issue_type] -= 1;
   1477
   1478	put_card = (mmc_tot_in_flight(mq) == 0);
   1479
   1480	mmc_cqe_check_busy(mq);
   1481
   1482	spin_unlock_irqrestore(&mq->lock, flags);
   1483
   1484	if (!mq->cqe_busy)
   1485		blk_mq_run_hw_queues(q, true);
   1486
   1487	if (put_card)
   1488		mmc_put_card(mq->card, &mq->ctx);
   1489}
   1490
   1491void mmc_blk_cqe_recovery(struct mmc_queue *mq)
   1492{
   1493	struct mmc_card *card = mq->card;
   1494	struct mmc_host *host = card->host;
   1495	int err;
   1496
   1497	pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
   1498
   1499	err = mmc_cqe_recovery(host);
   1500	if (err)
   1501		mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
   1502	mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
   1503
   1504	pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
   1505}
   1506
   1507static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
   1508{
   1509	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
   1510						  brq.mrq);
   1511	struct request *req = mmc_queue_req_to_req(mqrq);
   1512	struct request_queue *q = req->q;
   1513	struct mmc_queue *mq = q->queuedata;
   1514
   1515	/*
   1516	 * Block layer timeouts race with completions which means the normal
   1517	 * completion path cannot be used during recovery.
   1518	 */
   1519	if (mq->in_recovery)
   1520		mmc_blk_cqe_complete_rq(mq, req);
   1521	else if (likely(!blk_should_fake_timeout(req->q)))
   1522		blk_mq_complete_request(req);
   1523}
   1524
   1525static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
   1526{
   1527	mrq->done		= mmc_blk_cqe_req_done;
   1528	mrq->recovery_notifier	= mmc_cqe_recovery_notifier;
   1529
   1530	return mmc_cqe_start_req(host, mrq);
   1531}
   1532
   1533static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
   1534						 struct request *req)
   1535{
   1536	struct mmc_blk_request *brq = &mqrq->brq;
   1537
   1538	memset(brq, 0, sizeof(*brq));
   1539
   1540	brq->mrq.cmd = &brq->cmd;
   1541	brq->mrq.tag = req->tag;
   1542
   1543	return &brq->mrq;
   1544}
   1545
   1546static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
   1547{
   1548	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1549	struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
   1550
   1551	mrq->cmd->opcode = MMC_SWITCH;
   1552	mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
   1553			(EXT_CSD_FLUSH_CACHE << 16) |
   1554			(1 << 8) |
   1555			EXT_CSD_CMD_SET_NORMAL;
   1556	mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
   1557
   1558	return mmc_blk_cqe_start_req(mq->card->host, mrq);
   1559}
   1560
   1561static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
   1562{
   1563	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1564	struct mmc_host *host = mq->card->host;
   1565	int err;
   1566
   1567	mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
   1568	mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
   1569	mmc_pre_req(host, &mqrq->brq.mrq);
   1570
   1571	err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
   1572	if (err)
   1573		mmc_post_req(host, &mqrq->brq.mrq, err);
   1574
   1575	return err;
   1576}
   1577
   1578static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
   1579{
   1580	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1581	struct mmc_host *host = mq->card->host;
   1582
   1583	if (host->hsq_enabled)
   1584		return mmc_blk_hsq_issue_rw_rq(mq, req);
   1585
   1586	mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
   1587
   1588	return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
   1589}
   1590
   1591static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
   1592			       struct mmc_card *card,
   1593			       int disable_multi,
   1594			       struct mmc_queue *mq)
   1595{
   1596	u32 readcmd, writecmd;
   1597	struct mmc_blk_request *brq = &mqrq->brq;
   1598	struct request *req = mmc_queue_req_to_req(mqrq);
   1599	struct mmc_blk_data *md = mq->blkdata;
   1600	bool do_rel_wr, do_data_tag;
   1601
   1602	mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
   1603
   1604	brq->mrq.cmd = &brq->cmd;
   1605
   1606	brq->cmd.arg = blk_rq_pos(req);
   1607	if (!mmc_card_blockaddr(card))
   1608		brq->cmd.arg <<= 9;
   1609	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
   1610
   1611	if (brq->data.blocks > 1 || do_rel_wr) {
   1612		/* SPI multiblock writes terminate using a special
   1613		 * token, not a STOP_TRANSMISSION request.
   1614		 */
   1615		if (!mmc_host_is_spi(card->host) ||
   1616		    rq_data_dir(req) == READ)
   1617			brq->mrq.stop = &brq->stop;
   1618		readcmd = MMC_READ_MULTIPLE_BLOCK;
   1619		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
   1620	} else {
   1621		brq->mrq.stop = NULL;
   1622		readcmd = MMC_READ_SINGLE_BLOCK;
   1623		writecmd = MMC_WRITE_BLOCK;
   1624	}
   1625	brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
   1626
   1627	/*
   1628	 * Pre-defined multi-block transfers are preferable to
   1629	 * open ended-ones (and necessary for reliable writes).
   1630	 * However, it is not sufficient to just send CMD23,
   1631	 * and avoid the final CMD12, as on an error condition
   1632	 * CMD12 (stop) needs to be sent anyway. This, coupled
   1633	 * with Auto-CMD23 enhancements provided by some
   1634	 * hosts, means that the complexity of dealing
   1635	 * with this is best left to the host. If CMD23 is
   1636	 * supported by card and host, we'll fill sbc in and let
   1637	 * the host deal with handling it correctly. This means
   1638	 * that for hosts that don't expose MMC_CAP_CMD23, no
   1639	 * change of behavior will be observed.
   1640	 *
   1641	 * N.B: Some MMC cards experience perf degradation.
   1642	 * We'll avoid using CMD23-bounded multiblock writes for
   1643	 * these, while retaining features like reliable writes.
   1644	 */
   1645	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
   1646	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
   1647	     do_data_tag)) {
   1648		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
   1649		brq->sbc.arg = brq->data.blocks |
   1650			(do_rel_wr ? (1 << 31) : 0) |
   1651			(do_data_tag ? (1 << 29) : 0);
   1652		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
   1653		brq->mrq.sbc = &brq->sbc;
   1654	}
   1655}
   1656
   1657#define MMC_MAX_RETRIES		5
   1658#define MMC_DATA_RETRIES	2
   1659#define MMC_NO_RETRIES		(MMC_MAX_RETRIES + 1)
   1660
   1661static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
   1662{
   1663	struct mmc_command cmd = {
   1664		.opcode = MMC_STOP_TRANSMISSION,
   1665		.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
   1666		/* Some hosts wait for busy anyway, so provide a busy timeout */
   1667		.busy_timeout = timeout,
   1668	};
   1669
   1670	return mmc_wait_for_cmd(card->host, &cmd, 5);
   1671}
   1672
   1673static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
   1674{
   1675	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1676	struct mmc_blk_request *brq = &mqrq->brq;
   1677	unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
   1678	int err;
   1679
   1680	mmc_retune_hold_now(card->host);
   1681
   1682	mmc_blk_send_stop(card, timeout);
   1683
   1684	err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
   1685
   1686	mmc_retune_release(card->host);
   1687
   1688	return err;
   1689}
   1690
   1691#define MMC_READ_SINGLE_RETRIES	2
   1692
   1693/* Single sector read during recovery */
   1694static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
   1695{
   1696	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1697	struct mmc_request *mrq = &mqrq->brq.mrq;
   1698	struct mmc_card *card = mq->card;
   1699	struct mmc_host *host = card->host;
   1700	blk_status_t error = BLK_STS_OK;
   1701
   1702	do {
   1703		u32 status;
   1704		int err;
   1705		int retries = 0;
   1706
   1707		while (retries++ <= MMC_READ_SINGLE_RETRIES) {
   1708			mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
   1709
   1710			mmc_wait_for_req(host, mrq);
   1711
   1712			err = mmc_send_status(card, &status);
   1713			if (err)
   1714				goto error_exit;
   1715
   1716			if (!mmc_host_is_spi(host) &&
   1717			    !mmc_ready_for_data(status)) {
   1718				err = mmc_blk_fix_state(card, req);
   1719				if (err)
   1720					goto error_exit;
   1721			}
   1722
   1723			if (!mrq->cmd->error)
   1724				break;
   1725		}
   1726
   1727		if (mrq->cmd->error ||
   1728		    mrq->data->error ||
   1729		    (!mmc_host_is_spi(host) &&
   1730		     (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
   1731			error = BLK_STS_IOERR;
   1732		else
   1733			error = BLK_STS_OK;
   1734
   1735	} while (blk_update_request(req, error, 512));
   1736
   1737	return;
   1738
   1739error_exit:
   1740	mrq->data->bytes_xfered = 0;
   1741	blk_update_request(req, BLK_STS_IOERR, 512);
   1742	/* Let it try the remaining request again */
   1743	if (mqrq->retries > MMC_MAX_RETRIES - 1)
   1744		mqrq->retries = MMC_MAX_RETRIES - 1;
   1745}
   1746
   1747static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
   1748{
   1749	return !!brq->mrq.sbc;
   1750}
   1751
   1752static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
   1753{
   1754	return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
   1755}
   1756
   1757/*
   1758 * Check for errors the host controller driver might not have seen such as
   1759 * response mode errors or invalid card state.
   1760 */
   1761static bool mmc_blk_status_error(struct request *req, u32 status)
   1762{
   1763	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1764	struct mmc_blk_request *brq = &mqrq->brq;
   1765	struct mmc_queue *mq = req->q->queuedata;
   1766	u32 stop_err_bits;
   1767
   1768	if (mmc_host_is_spi(mq->card->host))
   1769		return false;
   1770
   1771	stop_err_bits = mmc_blk_stop_err_bits(brq);
   1772
   1773	return brq->cmd.resp[0]  & CMD_ERRORS    ||
   1774	       brq->stop.resp[0] & stop_err_bits ||
   1775	       status            & stop_err_bits ||
   1776	       (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
   1777}
   1778
   1779static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
   1780{
   1781	return !brq->sbc.error && !brq->cmd.error &&
   1782	       !(brq->cmd.resp[0] & CMD_ERRORS);
   1783}
   1784
   1785/*
   1786 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
   1787 * policy:
   1788 * 1. A request that has transferred at least some data is considered
   1789 * successful and will be requeued if there is remaining data to
   1790 * transfer.
   1791 * 2. Otherwise the number of retries is incremented and the request
   1792 * will be requeued if there are remaining retries.
   1793 * 3. Otherwise the request will be errored out.
   1794 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
   1795 * mqrq->retries. So there are only 4 possible actions here:
   1796 *	1. do not accept the bytes_xfered value i.e. set it to zero
   1797 *	2. change mqrq->retries to determine the number of retries
   1798 *	3. try to reset the card
   1799 *	4. read one sector at a time
   1800 */
   1801static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
   1802{
   1803	int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
   1804	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1805	struct mmc_blk_request *brq = &mqrq->brq;
   1806	struct mmc_blk_data *md = mq->blkdata;
   1807	struct mmc_card *card = mq->card;
   1808	u32 status;
   1809	u32 blocks;
   1810	int err;
   1811
   1812	/*
   1813	 * Some errors the host driver might not have seen. Set the number of
   1814	 * bytes transferred to zero in that case.
   1815	 */
   1816	err = __mmc_send_status(card, &status, 0);
   1817	if (err || mmc_blk_status_error(req, status))
   1818		brq->data.bytes_xfered = 0;
   1819
   1820	mmc_retune_release(card->host);
   1821
   1822	/*
   1823	 * Try again to get the status. This also provides an opportunity for
   1824	 * re-tuning.
   1825	 */
   1826	if (err)
   1827		err = __mmc_send_status(card, &status, 0);
   1828
   1829	/*
   1830	 * Nothing more to do after the number of bytes transferred has been
   1831	 * updated and there is no card.
   1832	 */
   1833	if (err && mmc_detect_card_removed(card->host))
   1834		return;
   1835
   1836	/* Try to get back to "tran" state */
   1837	if (!mmc_host_is_spi(mq->card->host) &&
   1838	    (err || !mmc_ready_for_data(status)))
   1839		err = mmc_blk_fix_state(mq->card, req);
   1840
   1841	/*
   1842	 * Special case for SD cards where the card might record the number of
   1843	 * blocks written.
   1844	 */
   1845	if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
   1846	    rq_data_dir(req) == WRITE) {
   1847		if (mmc_sd_num_wr_blocks(card, &blocks))
   1848			brq->data.bytes_xfered = 0;
   1849		else
   1850			brq->data.bytes_xfered = blocks << 9;
   1851	}
   1852
   1853	/* Reset if the card is in a bad state */
   1854	if (!mmc_host_is_spi(mq->card->host) &&
   1855	    err && mmc_blk_reset(md, card->host, type)) {
   1856		pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
   1857		mqrq->retries = MMC_NO_RETRIES;
   1858		return;
   1859	}
   1860
   1861	/*
   1862	 * If anything was done, just return and if there is anything remaining
   1863	 * on the request it will get requeued.
   1864	 */
   1865	if (brq->data.bytes_xfered)
   1866		return;
   1867
   1868	/* Reset before last retry */
   1869	if (mqrq->retries + 1 == MMC_MAX_RETRIES)
   1870		mmc_blk_reset(md, card->host, type);
   1871
   1872	/* Command errors fail fast, so use all MMC_MAX_RETRIES */
   1873	if (brq->sbc.error || brq->cmd.error)
   1874		return;
   1875
   1876	/* Reduce the remaining retries for data errors */
   1877	if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
   1878		mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
   1879		return;
   1880	}
   1881
   1882	/* FIXME: Missing single sector read for large sector size */
   1883	if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
   1884	    brq->data.blocks > 1) {
   1885		/* Read one sector at a time */
   1886		mmc_blk_read_single(mq, req);
   1887		return;
   1888	}
   1889}
   1890
   1891static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
   1892{
   1893	mmc_blk_eval_resp_error(brq);
   1894
   1895	return brq->sbc.error || brq->cmd.error || brq->stop.error ||
   1896	       brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
   1897}
   1898
   1899static int mmc_spi_err_check(struct mmc_card *card)
   1900{
   1901	u32 status = 0;
   1902	int err;
   1903
   1904	/*
   1905	 * SPI does not have a TRAN state we have to wait on, instead the
   1906	 * card is ready again when it no longer holds the line LOW.
   1907	 * We still have to ensure two things here before we know the write
   1908	 * was successful:
   1909	 * 1. The card has not disconnected during busy and we actually read our
   1910	 * own pull-up, thinking it was still connected, so ensure it
   1911	 * still responds.
   1912	 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
   1913	 * just reconnected card after being disconnected during busy.
   1914	 */
   1915	err = __mmc_send_status(card, &status, 0);
   1916	if (err)
   1917		return err;
   1918	/* All R1 and R2 bits of SPI are errors in our case */
   1919	if (status)
   1920		return -EIO;
   1921	return 0;
   1922}
   1923
   1924static int mmc_blk_busy_cb(void *cb_data, bool *busy)
   1925{
   1926	struct mmc_blk_busy_data *data = cb_data;
   1927	u32 status = 0;
   1928	int err;
   1929
   1930	err = mmc_send_status(data->card, &status);
   1931	if (err)
   1932		return err;
   1933
   1934	/* Accumulate response error bits. */
   1935	data->status |= status;
   1936
   1937	*busy = !mmc_ready_for_data(status);
   1938	return 0;
   1939}
   1940
   1941static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
   1942{
   1943	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1944	struct mmc_blk_busy_data cb_data;
   1945	int err;
   1946
   1947	if (rq_data_dir(req) == READ)
   1948		return 0;
   1949
   1950	if (mmc_host_is_spi(card->host)) {
   1951		err = mmc_spi_err_check(card);
   1952		if (err)
   1953			mqrq->brq.data.bytes_xfered = 0;
   1954		return err;
   1955	}
   1956
   1957	cb_data.card = card;
   1958	cb_data.status = 0;
   1959	err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
   1960				  &mmc_blk_busy_cb, &cb_data);
   1961
   1962	/*
   1963	 * Do not assume data transferred correctly if there are any error bits
   1964	 * set.
   1965	 */
   1966	if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
   1967		mqrq->brq.data.bytes_xfered = 0;
   1968		err = err ? err : -EIO;
   1969	}
   1970
   1971	/* Copy the exception bit so it will be seen later on */
   1972	if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
   1973		mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
   1974
   1975	return err;
   1976}
   1977
   1978static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
   1979					    struct request *req)
   1980{
   1981	int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
   1982
   1983	mmc_blk_reset_success(mq->blkdata, type);
   1984}
   1985
   1986static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
   1987{
   1988	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   1989	unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
   1990
   1991	if (nr_bytes) {
   1992		if (blk_update_request(req, BLK_STS_OK, nr_bytes))
   1993			blk_mq_requeue_request(req, true);
   1994		else
   1995			__blk_mq_end_request(req, BLK_STS_OK);
   1996	} else if (!blk_rq_bytes(req)) {
   1997		__blk_mq_end_request(req, BLK_STS_IOERR);
   1998	} else if (mqrq->retries++ < MMC_MAX_RETRIES) {
   1999		blk_mq_requeue_request(req, true);
   2000	} else {
   2001		if (mmc_card_removed(mq->card))
   2002			req->rq_flags |= RQF_QUIET;
   2003		blk_mq_end_request(req, BLK_STS_IOERR);
   2004	}
   2005}
   2006
   2007static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
   2008					struct mmc_queue_req *mqrq)
   2009{
   2010	return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
   2011	       (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
   2012		mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
   2013}
   2014
   2015static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
   2016				 struct mmc_queue_req *mqrq)
   2017{
   2018	if (mmc_blk_urgent_bkops_needed(mq, mqrq))
   2019		mmc_run_bkops(mq->card);
   2020}
   2021
   2022static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
   2023{
   2024	struct mmc_queue_req *mqrq =
   2025		container_of(mrq, struct mmc_queue_req, brq.mrq);
   2026	struct request *req = mmc_queue_req_to_req(mqrq);
   2027	struct request_queue *q = req->q;
   2028	struct mmc_queue *mq = q->queuedata;
   2029	struct mmc_host *host = mq->card->host;
   2030	unsigned long flags;
   2031
   2032	if (mmc_blk_rq_error(&mqrq->brq) ||
   2033	    mmc_blk_urgent_bkops_needed(mq, mqrq)) {
   2034		spin_lock_irqsave(&mq->lock, flags);
   2035		mq->recovery_needed = true;
   2036		mq->recovery_req = req;
   2037		spin_unlock_irqrestore(&mq->lock, flags);
   2038
   2039		host->cqe_ops->cqe_recovery_start(host);
   2040
   2041		schedule_work(&mq->recovery_work);
   2042		return;
   2043	}
   2044
   2045	mmc_blk_rw_reset_success(mq, req);
   2046
   2047	/*
   2048	 * Block layer timeouts race with completions which means the normal
   2049	 * completion path cannot be used during recovery.
   2050	 */
   2051	if (mq->in_recovery)
   2052		mmc_blk_cqe_complete_rq(mq, req);
   2053	else if (likely(!blk_should_fake_timeout(req->q)))
   2054		blk_mq_complete_request(req);
   2055}
   2056
   2057void mmc_blk_mq_complete(struct request *req)
   2058{
   2059	struct mmc_queue *mq = req->q->queuedata;
   2060	struct mmc_host *host = mq->card->host;
   2061
   2062	if (host->cqe_enabled)
   2063		mmc_blk_cqe_complete_rq(mq, req);
   2064	else if (likely(!blk_should_fake_timeout(req->q)))
   2065		mmc_blk_mq_complete_rq(mq, req);
   2066}
   2067
   2068static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
   2069				       struct request *req)
   2070{
   2071	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   2072	struct mmc_host *host = mq->card->host;
   2073
   2074	if (mmc_blk_rq_error(&mqrq->brq) ||
   2075	    mmc_blk_card_busy(mq->card, req)) {
   2076		mmc_blk_mq_rw_recovery(mq, req);
   2077	} else {
   2078		mmc_blk_rw_reset_success(mq, req);
   2079		mmc_retune_release(host);
   2080	}
   2081
   2082	mmc_blk_urgent_bkops(mq, mqrq);
   2083}
   2084
   2085static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
   2086{
   2087	unsigned long flags;
   2088	bool put_card;
   2089
   2090	spin_lock_irqsave(&mq->lock, flags);
   2091
   2092	mq->in_flight[mmc_issue_type(mq, req)] -= 1;
   2093
   2094	put_card = (mmc_tot_in_flight(mq) == 0);
   2095
   2096	spin_unlock_irqrestore(&mq->lock, flags);
   2097
   2098	if (put_card)
   2099		mmc_put_card(mq->card, &mq->ctx);
   2100}
   2101
   2102static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
   2103				bool can_sleep)
   2104{
   2105	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   2106	struct mmc_request *mrq = &mqrq->brq.mrq;
   2107	struct mmc_host *host = mq->card->host;
   2108
   2109	mmc_post_req(host, mrq, 0);
   2110
   2111	/*
   2112	 * Block layer timeouts race with completions which means the normal
   2113	 * completion path cannot be used during recovery.
   2114	 */
   2115	if (mq->in_recovery) {
   2116		mmc_blk_mq_complete_rq(mq, req);
   2117	} else if (likely(!blk_should_fake_timeout(req->q))) {
   2118		if (can_sleep)
   2119			blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
   2120		else
   2121			blk_mq_complete_request(req);
   2122	}
   2123
   2124	mmc_blk_mq_dec_in_flight(mq, req);
   2125}
   2126
   2127void mmc_blk_mq_recovery(struct mmc_queue *mq)
   2128{
   2129	struct request *req = mq->recovery_req;
   2130	struct mmc_host *host = mq->card->host;
   2131	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   2132
   2133	mq->recovery_req = NULL;
   2134	mq->rw_wait = false;
   2135
   2136	if (mmc_blk_rq_error(&mqrq->brq)) {
   2137		mmc_retune_hold_now(host);
   2138		mmc_blk_mq_rw_recovery(mq, req);
   2139	}
   2140
   2141	mmc_blk_urgent_bkops(mq, mqrq);
   2142
   2143	mmc_blk_mq_post_req(mq, req, true);
   2144}
   2145
   2146static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
   2147					 struct request **prev_req)
   2148{
   2149	if (mmc_host_done_complete(mq->card->host))
   2150		return;
   2151
   2152	mutex_lock(&mq->complete_lock);
   2153
   2154	if (!mq->complete_req)
   2155		goto out_unlock;
   2156
   2157	mmc_blk_mq_poll_completion(mq, mq->complete_req);
   2158
   2159	if (prev_req)
   2160		*prev_req = mq->complete_req;
   2161	else
   2162		mmc_blk_mq_post_req(mq, mq->complete_req, true);
   2163
   2164	mq->complete_req = NULL;
   2165
   2166out_unlock:
   2167	mutex_unlock(&mq->complete_lock);
   2168}
   2169
   2170void mmc_blk_mq_complete_work(struct work_struct *work)
   2171{
   2172	struct mmc_queue *mq = container_of(work, struct mmc_queue,
   2173					    complete_work);
   2174
   2175	mmc_blk_mq_complete_prev_req(mq, NULL);
   2176}
   2177
   2178static void mmc_blk_mq_req_done(struct mmc_request *mrq)
   2179{
   2180	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
   2181						  brq.mrq);
   2182	struct request *req = mmc_queue_req_to_req(mqrq);
   2183	struct request_queue *q = req->q;
   2184	struct mmc_queue *mq = q->queuedata;
   2185	struct mmc_host *host = mq->card->host;
   2186	unsigned long flags;
   2187
   2188	if (!mmc_host_done_complete(host)) {
   2189		bool waiting;
   2190
   2191		/*
   2192		 * We cannot complete the request in this context, so record
   2193		 * that there is a request to complete, and that a following
   2194		 * request does not need to wait (although it does need to
   2195		 * complete complete_req first).
   2196		 */
   2197		spin_lock_irqsave(&mq->lock, flags);
   2198		mq->complete_req = req;
   2199		mq->rw_wait = false;
   2200		waiting = mq->waiting;
   2201		spin_unlock_irqrestore(&mq->lock, flags);
   2202
   2203		/*
   2204		 * If 'waiting' then the waiting task will complete this
   2205		 * request, otherwise queue a work to do it. Note that
   2206		 * complete_work may still race with the dispatch of a following
   2207		 * request.
   2208		 */
   2209		if (waiting)
   2210			wake_up(&mq->wait);
   2211		else
   2212			queue_work(mq->card->complete_wq, &mq->complete_work);
   2213
   2214		return;
   2215	}
   2216
   2217	/* Take the recovery path for errors or urgent background operations */
   2218	if (mmc_blk_rq_error(&mqrq->brq) ||
   2219	    mmc_blk_urgent_bkops_needed(mq, mqrq)) {
   2220		spin_lock_irqsave(&mq->lock, flags);
   2221		mq->recovery_needed = true;
   2222		mq->recovery_req = req;
   2223		spin_unlock_irqrestore(&mq->lock, flags);
   2224		wake_up(&mq->wait);
   2225		schedule_work(&mq->recovery_work);
   2226		return;
   2227	}
   2228
   2229	mmc_blk_rw_reset_success(mq, req);
   2230
   2231	mq->rw_wait = false;
   2232	wake_up(&mq->wait);
   2233
   2234	/* context unknown */
   2235	mmc_blk_mq_post_req(mq, req, false);
   2236}
   2237
   2238static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
   2239{
   2240	unsigned long flags;
   2241	bool done;
   2242
   2243	/*
   2244	 * Wait while there is another request in progress, but not if recovery
   2245	 * is needed. Also indicate whether there is a request waiting to start.
   2246	 */
   2247	spin_lock_irqsave(&mq->lock, flags);
   2248	if (mq->recovery_needed) {
   2249		*err = -EBUSY;
   2250		done = true;
   2251	} else {
   2252		done = !mq->rw_wait;
   2253	}
   2254	mq->waiting = !done;
   2255	spin_unlock_irqrestore(&mq->lock, flags);
   2256
   2257	return done;
   2258}
   2259
   2260static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
   2261{
   2262	int err = 0;
   2263
   2264	wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
   2265
   2266	/* Always complete the previous request if there is one */
   2267	mmc_blk_mq_complete_prev_req(mq, prev_req);
   2268
   2269	return err;
   2270}
   2271
   2272static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
   2273				  struct request *req)
   2274{
   2275	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
   2276	struct mmc_host *host = mq->card->host;
   2277	struct request *prev_req = NULL;
   2278	int err = 0;
   2279
   2280	mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
   2281
   2282	mqrq->brq.mrq.done = mmc_blk_mq_req_done;
   2283
   2284	mmc_pre_req(host, &mqrq->brq.mrq);
   2285
   2286	err = mmc_blk_rw_wait(mq, &prev_req);
   2287	if (err)
   2288		goto out_post_req;
   2289
   2290	mq->rw_wait = true;
   2291
   2292	err = mmc_start_request(host, &mqrq->brq.mrq);
   2293
   2294	if (prev_req)
   2295		mmc_blk_mq_post_req(mq, prev_req, true);
   2296
   2297	if (err)
   2298		mq->rw_wait = false;
   2299
   2300	/* Release re-tuning here where there is no synchronization required */
   2301	if (err || mmc_host_done_complete(host))
   2302		mmc_retune_release(host);
   2303
   2304out_post_req:
   2305	if (err)
   2306		mmc_post_req(host, &mqrq->brq.mrq, err);
   2307
   2308	return err;
   2309}
   2310
   2311static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
   2312{
   2313	if (host->cqe_enabled)
   2314		return host->cqe_ops->cqe_wait_for_idle(host);
   2315
   2316	return mmc_blk_rw_wait(mq, NULL);
   2317}
   2318
   2319enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
   2320{
   2321	struct mmc_blk_data *md = mq->blkdata;
   2322	struct mmc_card *card = md->queue.card;
   2323	struct mmc_host *host = card->host;
   2324	int ret;
   2325
   2326	ret = mmc_blk_part_switch(card, md->part_type);
   2327	if (ret)
   2328		return MMC_REQ_FAILED_TO_START;
   2329
   2330	switch (mmc_issue_type(mq, req)) {
   2331	case MMC_ISSUE_SYNC:
   2332		ret = mmc_blk_wait_for_idle(mq, host);
   2333		if (ret)
   2334			return MMC_REQ_BUSY;
   2335		switch (req_op(req)) {
   2336		case REQ_OP_DRV_IN:
   2337		case REQ_OP_DRV_OUT:
   2338			mmc_blk_issue_drv_op(mq, req);
   2339			break;
   2340		case REQ_OP_DISCARD:
   2341			mmc_blk_issue_discard_rq(mq, req);
   2342			break;
   2343		case REQ_OP_SECURE_ERASE:
   2344			mmc_blk_issue_secdiscard_rq(mq, req);
   2345			break;
   2346		case REQ_OP_WRITE_ZEROES:
   2347			mmc_blk_issue_trim_rq(mq, req);
   2348			break;
   2349		case REQ_OP_FLUSH:
   2350			mmc_blk_issue_flush(mq, req);
   2351			break;
   2352		default:
   2353			WARN_ON_ONCE(1);
   2354			return MMC_REQ_FAILED_TO_START;
   2355		}
   2356		return MMC_REQ_FINISHED;
   2357	case MMC_ISSUE_DCMD:
   2358	case MMC_ISSUE_ASYNC:
   2359		switch (req_op(req)) {
   2360		case REQ_OP_FLUSH:
   2361			if (!mmc_cache_enabled(host)) {
   2362				blk_mq_end_request(req, BLK_STS_OK);
   2363				return MMC_REQ_FINISHED;
   2364			}
   2365			ret = mmc_blk_cqe_issue_flush(mq, req);
   2366			break;
   2367		case REQ_OP_READ:
   2368		case REQ_OP_WRITE:
   2369			if (host->cqe_enabled)
   2370				ret = mmc_blk_cqe_issue_rw_rq(mq, req);
   2371			else
   2372				ret = mmc_blk_mq_issue_rw_rq(mq, req);
   2373			break;
   2374		default:
   2375			WARN_ON_ONCE(1);
   2376			ret = -EINVAL;
   2377		}
   2378		if (!ret)
   2379			return MMC_REQ_STARTED;
   2380		return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
   2381	default:
   2382		WARN_ON_ONCE(1);
   2383		return MMC_REQ_FAILED_TO_START;
   2384	}
   2385}
   2386
   2387static inline int mmc_blk_readonly(struct mmc_card *card)
   2388{
   2389	return mmc_card_readonly(card) ||
   2390	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
   2391}
   2392
   2393static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
   2394					      struct device *parent,
   2395					      sector_t size,
   2396					      bool default_ro,
   2397					      const char *subname,
   2398					      int area_type,
   2399					      unsigned int part_type)
   2400{
   2401	struct mmc_blk_data *md;
   2402	int devidx, ret;
   2403	char cap_str[10];
   2404	bool cache_enabled = false;
   2405	bool fua_enabled = false;
   2406
   2407	devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
   2408	if (devidx < 0) {
   2409		/*
   2410		 * We get -ENOSPC because there are no more any available
   2411		 * devidx. The reason may be that, either userspace haven't yet
   2412		 * unmounted the partitions, which postpones mmc_blk_release()
   2413		 * from being called, or the device has more partitions than
   2414		 * what we support.
   2415		 */
   2416		if (devidx == -ENOSPC)
   2417			dev_err(mmc_dev(card->host),
   2418				"no more device IDs available\n");
   2419
   2420		return ERR_PTR(devidx);
   2421	}
   2422
   2423	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
   2424	if (!md) {
   2425		ret = -ENOMEM;
   2426		goto out;
   2427	}
   2428
   2429	md->area_type = area_type;
   2430
   2431	/*
   2432	 * Set the read-only status based on the supported commands
   2433	 * and the write protect switch.
   2434	 */
   2435	md->read_only = mmc_blk_readonly(card);
   2436
   2437	md->disk = mmc_init_queue(&md->queue, card);
   2438	if (IS_ERR(md->disk)) {
   2439		ret = PTR_ERR(md->disk);
   2440		goto err_kfree;
   2441	}
   2442
   2443	INIT_LIST_HEAD(&md->part);
   2444	INIT_LIST_HEAD(&md->rpmbs);
   2445	kref_init(&md->kref);
   2446
   2447	md->queue.blkdata = md;
   2448	md->part_type = part_type;
   2449
   2450	md->disk->major	= MMC_BLOCK_MAJOR;
   2451	md->disk->minors = perdev_minors;
   2452	md->disk->first_minor = devidx * perdev_minors;
   2453	md->disk->fops = &mmc_bdops;
   2454	md->disk->private_data = md;
   2455	md->parent = parent;
   2456	set_disk_ro(md->disk, md->read_only || default_ro);
   2457	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
   2458		md->disk->flags |= GENHD_FL_NO_PART;
   2459
   2460	/*
   2461	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
   2462	 *
   2463	 * - be set for removable media with permanent block devices
   2464	 * - be unset for removable block devices with permanent media
   2465	 *
   2466	 * Since MMC block devices clearly fall under the second
   2467	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
   2468	 * should use the block device creation/destruction hotplug
   2469	 * messages to tell when the card is present.
   2470	 */
   2471
   2472	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
   2473		 "mmcblk%u%s", card->host->index, subname ? subname : "");
   2474
   2475	set_capacity(md->disk, size);
   2476
   2477	if (mmc_host_cmd23(card->host)) {
   2478		if ((mmc_card_mmc(card) &&
   2479		     card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
   2480		    (mmc_card_sd(card) &&
   2481		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
   2482			md->flags |= MMC_BLK_CMD23;
   2483	}
   2484
   2485	if (md->flags & MMC_BLK_CMD23 &&
   2486	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
   2487	     card->ext_csd.rel_sectors)) {
   2488		md->flags |= MMC_BLK_REL_WR;
   2489		fua_enabled = true;
   2490		cache_enabled = true;
   2491	}
   2492	if (mmc_cache_enabled(card->host))
   2493		cache_enabled  = true;
   2494
   2495	blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
   2496
   2497	string_get_size((u64)size, 512, STRING_UNITS_2,
   2498			cap_str, sizeof(cap_str));
   2499	pr_info("%s: %s %s %s %s\n",
   2500		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
   2501		cap_str, md->read_only ? "(ro)" : "");
   2502
   2503	/* used in ->open, must be set before add_disk: */
   2504	if (area_type == MMC_BLK_DATA_AREA_MAIN)
   2505		dev_set_drvdata(&card->dev, md);
   2506	ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
   2507	if (ret)
   2508		goto err_cleanup_queue;
   2509	return md;
   2510
   2511 err_cleanup_queue:
   2512	blk_cleanup_queue(md->disk->queue);
   2513	blk_mq_free_tag_set(&md->queue.tag_set);
   2514 err_kfree:
   2515	kfree(md);
   2516 out:
   2517	ida_simple_remove(&mmc_blk_ida, devidx);
   2518	return ERR_PTR(ret);
   2519}
   2520
   2521static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
   2522{
   2523	sector_t size;
   2524
   2525	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
   2526		/*
   2527		 * The EXT_CSD sector count is in number or 512 byte
   2528		 * sectors.
   2529		 */
   2530		size = card->ext_csd.sectors;
   2531	} else {
   2532		/*
   2533		 * The CSD capacity field is in units of read_blkbits.
   2534		 * set_capacity takes units of 512 bytes.
   2535		 */
   2536		size = (typeof(sector_t))card->csd.capacity
   2537			<< (card->csd.read_blkbits - 9);
   2538	}
   2539
   2540	return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
   2541					MMC_BLK_DATA_AREA_MAIN, 0);
   2542}
   2543
   2544static int mmc_blk_alloc_part(struct mmc_card *card,
   2545			      struct mmc_blk_data *md,
   2546			      unsigned int part_type,
   2547			      sector_t size,
   2548			      bool default_ro,
   2549			      const char *subname,
   2550			      int area_type)
   2551{
   2552	struct mmc_blk_data *part_md;
   2553
   2554	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
   2555				    subname, area_type, part_type);
   2556	if (IS_ERR(part_md))
   2557		return PTR_ERR(part_md);
   2558	list_add(&part_md->part, &md->part);
   2559
   2560	return 0;
   2561}
   2562
   2563/**
   2564 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
   2565 * @filp: the character device file
   2566 * @cmd: the ioctl() command
   2567 * @arg: the argument from userspace
   2568 *
   2569 * This will essentially just redirect the ioctl()s coming in over to
   2570 * the main block device spawning the RPMB character device.
   2571 */
   2572static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
   2573			   unsigned long arg)
   2574{
   2575	struct mmc_rpmb_data *rpmb = filp->private_data;
   2576	int ret;
   2577
   2578	switch (cmd) {
   2579	case MMC_IOC_CMD:
   2580		ret = mmc_blk_ioctl_cmd(rpmb->md,
   2581					(struct mmc_ioc_cmd __user *)arg,
   2582					rpmb);
   2583		break;
   2584	case MMC_IOC_MULTI_CMD:
   2585		ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
   2586					(struct mmc_ioc_multi_cmd __user *)arg,
   2587					rpmb);
   2588		break;
   2589	default:
   2590		ret = -EINVAL;
   2591		break;
   2592	}
   2593
   2594	return ret;
   2595}
   2596
   2597#ifdef CONFIG_COMPAT
   2598static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
   2599			      unsigned long arg)
   2600{
   2601	return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
   2602}
   2603#endif
   2604
   2605static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
   2606{
   2607	struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
   2608						  struct mmc_rpmb_data, chrdev);
   2609
   2610	get_device(&rpmb->dev);
   2611	filp->private_data = rpmb;
   2612	mmc_blk_get(rpmb->md->disk);
   2613
   2614	return nonseekable_open(inode, filp);
   2615}
   2616
   2617static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
   2618{
   2619	struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
   2620						  struct mmc_rpmb_data, chrdev);
   2621
   2622	mmc_blk_put(rpmb->md);
   2623	put_device(&rpmb->dev);
   2624
   2625	return 0;
   2626}
   2627
   2628static const struct file_operations mmc_rpmb_fileops = {
   2629	.release = mmc_rpmb_chrdev_release,
   2630	.open = mmc_rpmb_chrdev_open,
   2631	.owner = THIS_MODULE,
   2632	.llseek = no_llseek,
   2633	.unlocked_ioctl = mmc_rpmb_ioctl,
   2634#ifdef CONFIG_COMPAT
   2635	.compat_ioctl = mmc_rpmb_ioctl_compat,
   2636#endif
   2637};
   2638
   2639static void mmc_blk_rpmb_device_release(struct device *dev)
   2640{
   2641	struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
   2642
   2643	ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
   2644	kfree(rpmb);
   2645}
   2646
   2647static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
   2648				   struct mmc_blk_data *md,
   2649				   unsigned int part_index,
   2650				   sector_t size,
   2651				   const char *subname)
   2652{
   2653	int devidx, ret;
   2654	char rpmb_name[DISK_NAME_LEN];
   2655	char cap_str[10];
   2656	struct mmc_rpmb_data *rpmb;
   2657
   2658	/* This creates the minor number for the RPMB char device */
   2659	devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
   2660	if (devidx < 0)
   2661		return devidx;
   2662
   2663	rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
   2664	if (!rpmb) {
   2665		ida_simple_remove(&mmc_rpmb_ida, devidx);
   2666		return -ENOMEM;
   2667	}
   2668
   2669	snprintf(rpmb_name, sizeof(rpmb_name),
   2670		 "mmcblk%u%s", card->host->index, subname ? subname : "");
   2671
   2672	rpmb->id = devidx;
   2673	rpmb->part_index = part_index;
   2674	rpmb->dev.init_name = rpmb_name;
   2675	rpmb->dev.bus = &mmc_rpmb_bus_type;
   2676	rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
   2677	rpmb->dev.parent = &card->dev;
   2678	rpmb->dev.release = mmc_blk_rpmb_device_release;
   2679	device_initialize(&rpmb->dev);
   2680	dev_set_drvdata(&rpmb->dev, rpmb);
   2681	rpmb->md = md;
   2682
   2683	cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
   2684	rpmb->chrdev.owner = THIS_MODULE;
   2685	ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
   2686	if (ret) {
   2687		pr_err("%s: could not add character device\n", rpmb_name);
   2688		goto out_put_device;
   2689	}
   2690
   2691	list_add(&rpmb->node, &md->rpmbs);
   2692
   2693	string_get_size((u64)size, 512, STRING_UNITS_2,
   2694			cap_str, sizeof(cap_str));
   2695
   2696	pr_info("%s: %s %s %s, chardev (%d:%d)\n",
   2697		rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str,
   2698		MAJOR(mmc_rpmb_devt), rpmb->id);
   2699
   2700	return 0;
   2701
   2702out_put_device:
   2703	put_device(&rpmb->dev);
   2704	return ret;
   2705}
   2706
   2707static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
   2708
   2709{
   2710	cdev_device_del(&rpmb->chrdev, &rpmb->dev);
   2711	put_device(&rpmb->dev);
   2712}
   2713
   2714/* MMC Physical partitions consist of two boot partitions and
   2715 * up to four general purpose partitions.
   2716 * For each partition enabled in EXT_CSD a block device will be allocatedi
   2717 * to provide access to the partition.
   2718 */
   2719
   2720static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
   2721{
   2722	int idx, ret;
   2723
   2724	if (!mmc_card_mmc(card))
   2725		return 0;
   2726
   2727	for (idx = 0; idx < card->nr_parts; idx++) {
   2728		if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
   2729			/*
   2730			 * RPMB partitions does not provide block access, they
   2731			 * are only accessed using ioctl():s. Thus create
   2732			 * special RPMB block devices that do not have a
   2733			 * backing block queue for these.
   2734			 */
   2735			ret = mmc_blk_alloc_rpmb_part(card, md,
   2736				card->part[idx].part_cfg,
   2737				card->part[idx].size >> 9,
   2738				card->part[idx].name);
   2739			if (ret)
   2740				return ret;
   2741		} else if (card->part[idx].size) {
   2742			ret = mmc_blk_alloc_part(card, md,
   2743				card->part[idx].part_cfg,
   2744				card->part[idx].size >> 9,
   2745				card->part[idx].force_ro,
   2746				card->part[idx].name,
   2747				card->part[idx].area_type);
   2748			if (ret)
   2749				return ret;
   2750		}
   2751	}
   2752
   2753	return 0;
   2754}
   2755
   2756static void mmc_blk_remove_req(struct mmc_blk_data *md)
   2757{
   2758	/*
   2759	 * Flush remaining requests and free queues. It is freeing the queue
   2760	 * that stops new requests from being accepted.
   2761	 */
   2762	del_gendisk(md->disk);
   2763	mmc_cleanup_queue(&md->queue);
   2764	mmc_blk_put(md);
   2765}
   2766
   2767static void mmc_blk_remove_parts(struct mmc_card *card,
   2768				 struct mmc_blk_data *md)
   2769{
   2770	struct list_head *pos, *q;
   2771	struct mmc_blk_data *part_md;
   2772	struct mmc_rpmb_data *rpmb;
   2773
   2774	/* Remove RPMB partitions */
   2775	list_for_each_safe(pos, q, &md->rpmbs) {
   2776		rpmb = list_entry(pos, struct mmc_rpmb_data, node);
   2777		list_del(pos);
   2778		mmc_blk_remove_rpmb_part(rpmb);
   2779	}
   2780	/* Remove block partitions */
   2781	list_for_each_safe(pos, q, &md->part) {
   2782		part_md = list_entry(pos, struct mmc_blk_data, part);
   2783		list_del(pos);
   2784		mmc_blk_remove_req(part_md);
   2785	}
   2786}
   2787
   2788#ifdef CONFIG_DEBUG_FS
   2789
   2790static int mmc_dbg_card_status_get(void *data, u64 *val)
   2791{
   2792	struct mmc_card *card = data;
   2793	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
   2794	struct mmc_queue *mq = &md->queue;
   2795	struct request *req;
   2796	int ret;
   2797
   2798	/* Ask the block layer about the card status */
   2799	req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
   2800	if (IS_ERR(req))
   2801		return PTR_ERR(req);
   2802	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
   2803	blk_execute_rq(req, false);
   2804	ret = req_to_mmc_queue_req(req)->drv_op_result;
   2805	if (ret >= 0) {
   2806		*val = ret;
   2807		ret = 0;
   2808	}
   2809	blk_mq_free_request(req);
   2810
   2811	return ret;
   2812}
   2813DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
   2814			 NULL, "%08llx\n");
   2815
   2816/* That is two digits * 512 + 1 for newline */
   2817#define EXT_CSD_STR_LEN 1025
   2818
   2819static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
   2820{
   2821	struct mmc_card *card = inode->i_private;
   2822	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
   2823	struct mmc_queue *mq = &md->queue;
   2824	struct request *req;
   2825	char *buf;
   2826	ssize_t n = 0;
   2827	u8 *ext_csd;
   2828	int err, i;
   2829
   2830	buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
   2831	if (!buf)
   2832		return -ENOMEM;
   2833
   2834	/* Ask the block layer for the EXT CSD */
   2835	req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
   2836	if (IS_ERR(req)) {
   2837		err = PTR_ERR(req);
   2838		goto out_free;
   2839	}
   2840	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
   2841	req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
   2842	blk_execute_rq(req, false);
   2843	err = req_to_mmc_queue_req(req)->drv_op_result;
   2844	blk_mq_free_request(req);
   2845	if (err) {
   2846		pr_err("FAILED %d\n", err);
   2847		goto out_free;
   2848	}
   2849
   2850	for (i = 0; i < 512; i++)
   2851		n += sprintf(buf + n, "%02x", ext_csd[i]);
   2852	n += sprintf(buf + n, "\n");
   2853
   2854	if (n != EXT_CSD_STR_LEN) {
   2855		err = -EINVAL;
   2856		kfree(ext_csd);
   2857		goto out_free;
   2858	}
   2859
   2860	filp->private_data = buf;
   2861	kfree(ext_csd);
   2862	return 0;
   2863
   2864out_free:
   2865	kfree(buf);
   2866	return err;
   2867}
   2868
   2869static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
   2870				size_t cnt, loff_t *ppos)
   2871{
   2872	char *buf = filp->private_data;
   2873
   2874	return simple_read_from_buffer(ubuf, cnt, ppos,
   2875				       buf, EXT_CSD_STR_LEN);
   2876}
   2877
   2878static int mmc_ext_csd_release(struct inode *inode, struct file *file)
   2879{
   2880	kfree(file->private_data);
   2881	return 0;
   2882}
   2883
   2884static const struct file_operations mmc_dbg_ext_csd_fops = {
   2885	.open		= mmc_ext_csd_open,
   2886	.read		= mmc_ext_csd_read,
   2887	.release	= mmc_ext_csd_release,
   2888	.llseek		= default_llseek,
   2889};
   2890
   2891static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
   2892{
   2893	struct dentry *root;
   2894
   2895	if (!card->debugfs_root)
   2896		return 0;
   2897
   2898	root = card->debugfs_root;
   2899
   2900	if (mmc_card_mmc(card) || mmc_card_sd(card)) {
   2901		md->status_dentry =
   2902			debugfs_create_file_unsafe("status", 0400, root,
   2903						   card,
   2904						   &mmc_dbg_card_status_fops);
   2905		if (!md->status_dentry)
   2906			return -EIO;
   2907	}
   2908
   2909	if (mmc_card_mmc(card)) {
   2910		md->ext_csd_dentry =
   2911			debugfs_create_file("ext_csd", S_IRUSR, root, card,
   2912					    &mmc_dbg_ext_csd_fops);
   2913		if (!md->ext_csd_dentry)
   2914			return -EIO;
   2915	}
   2916
   2917	return 0;
   2918}
   2919
   2920static void mmc_blk_remove_debugfs(struct mmc_card *card,
   2921				   struct mmc_blk_data *md)
   2922{
   2923	if (!card->debugfs_root)
   2924		return;
   2925
   2926	if (!IS_ERR_OR_NULL(md->status_dentry)) {
   2927		debugfs_remove(md->status_dentry);
   2928		md->status_dentry = NULL;
   2929	}
   2930
   2931	if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
   2932		debugfs_remove(md->ext_csd_dentry);
   2933		md->ext_csd_dentry = NULL;
   2934	}
   2935}
   2936
   2937#else
   2938
   2939static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
   2940{
   2941	return 0;
   2942}
   2943
   2944static void mmc_blk_remove_debugfs(struct mmc_card *card,
   2945				   struct mmc_blk_data *md)
   2946{
   2947}
   2948
   2949#endif /* CONFIG_DEBUG_FS */
   2950
   2951static int mmc_blk_probe(struct mmc_card *card)
   2952{
   2953	struct mmc_blk_data *md;
   2954	int ret = 0;
   2955
   2956	/*
   2957	 * Check that the card supports the command class(es) we need.
   2958	 */
   2959	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
   2960		return -ENODEV;
   2961
   2962	mmc_fixup_device(card, mmc_blk_fixups);
   2963
   2964	card->complete_wq = alloc_workqueue("mmc_complete",
   2965					WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
   2966	if (!card->complete_wq) {
   2967		pr_err("Failed to create mmc completion workqueue");
   2968		return -ENOMEM;
   2969	}
   2970
   2971	md = mmc_blk_alloc(card);
   2972	if (IS_ERR(md)) {
   2973		ret = PTR_ERR(md);
   2974		goto out_free;
   2975	}
   2976
   2977	ret = mmc_blk_alloc_parts(card, md);
   2978	if (ret)
   2979		goto out;
   2980
   2981	/* Add two debugfs entries */
   2982	mmc_blk_add_debugfs(card, md);
   2983
   2984	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
   2985	pm_runtime_use_autosuspend(&card->dev);
   2986
   2987	/*
   2988	 * Don't enable runtime PM for SD-combo cards here. Leave that
   2989	 * decision to be taken during the SDIO init sequence instead.
   2990	 */
   2991	if (card->type != MMC_TYPE_SD_COMBO) {
   2992		pm_runtime_set_active(&card->dev);
   2993		pm_runtime_enable(&card->dev);
   2994	}
   2995
   2996	return 0;
   2997
   2998out:
   2999	mmc_blk_remove_parts(card, md);
   3000	mmc_blk_remove_req(md);
   3001out_free:
   3002	destroy_workqueue(card->complete_wq);
   3003	return ret;
   3004}
   3005
   3006static void mmc_blk_remove(struct mmc_card *card)
   3007{
   3008	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
   3009
   3010	mmc_blk_remove_debugfs(card, md);
   3011	mmc_blk_remove_parts(card, md);
   3012	pm_runtime_get_sync(&card->dev);
   3013	if (md->part_curr != md->part_type) {
   3014		mmc_claim_host(card->host);
   3015		mmc_blk_part_switch(card, md->part_type);
   3016		mmc_release_host(card->host);
   3017	}
   3018	if (card->type != MMC_TYPE_SD_COMBO)
   3019		pm_runtime_disable(&card->dev);
   3020	pm_runtime_put_noidle(&card->dev);
   3021	mmc_blk_remove_req(md);
   3022	dev_set_drvdata(&card->dev, NULL);
   3023	destroy_workqueue(card->complete_wq);
   3024}
   3025
   3026static int _mmc_blk_suspend(struct mmc_card *card)
   3027{
   3028	struct mmc_blk_data *part_md;
   3029	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
   3030
   3031	if (md) {
   3032		mmc_queue_suspend(&md->queue);
   3033		list_for_each_entry(part_md, &md->part, part) {
   3034			mmc_queue_suspend(&part_md->queue);
   3035		}
   3036	}
   3037	return 0;
   3038}
   3039
   3040static void mmc_blk_shutdown(struct mmc_card *card)
   3041{
   3042	_mmc_blk_suspend(card);
   3043}
   3044
   3045#ifdef CONFIG_PM_SLEEP
   3046static int mmc_blk_suspend(struct device *dev)
   3047{
   3048	struct mmc_card *card = mmc_dev_to_card(dev);
   3049
   3050	return _mmc_blk_suspend(card);
   3051}
   3052
   3053static int mmc_blk_resume(struct device *dev)
   3054{
   3055	struct mmc_blk_data *part_md;
   3056	struct mmc_blk_data *md = dev_get_drvdata(dev);
   3057
   3058	if (md) {
   3059		/*
   3060		 * Resume involves the card going into idle state,
   3061		 * so current partition is always the main one.
   3062		 */
   3063		md->part_curr = md->part_type;
   3064		mmc_queue_resume(&md->queue);
   3065		list_for_each_entry(part_md, &md->part, part) {
   3066			mmc_queue_resume(&part_md->queue);
   3067		}
   3068	}
   3069	return 0;
   3070}
   3071#endif
   3072
   3073static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
   3074
   3075static struct mmc_driver mmc_driver = {
   3076	.drv		= {
   3077		.name	= "mmcblk",
   3078		.pm	= &mmc_blk_pm_ops,
   3079	},
   3080	.probe		= mmc_blk_probe,
   3081	.remove		= mmc_blk_remove,
   3082	.shutdown	= mmc_blk_shutdown,
   3083};
   3084
   3085static int __init mmc_blk_init(void)
   3086{
   3087	int res;
   3088
   3089	res  = bus_register(&mmc_rpmb_bus_type);
   3090	if (res < 0) {
   3091		pr_err("mmcblk: could not register RPMB bus type\n");
   3092		return res;
   3093	}
   3094	res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
   3095	if (res < 0) {
   3096		pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
   3097		goto out_bus_unreg;
   3098	}
   3099
   3100	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
   3101		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
   3102
   3103	max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
   3104
   3105	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
   3106	if (res)
   3107		goto out_chrdev_unreg;
   3108
   3109	res = mmc_register_driver(&mmc_driver);
   3110	if (res)
   3111		goto out_blkdev_unreg;
   3112
   3113	return 0;
   3114
   3115out_blkdev_unreg:
   3116	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
   3117out_chrdev_unreg:
   3118	unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
   3119out_bus_unreg:
   3120	bus_unregister(&mmc_rpmb_bus_type);
   3121	return res;
   3122}
   3123
   3124static void __exit mmc_blk_exit(void)
   3125{
   3126	mmc_unregister_driver(&mmc_driver);
   3127	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
   3128	unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
   3129	bus_unregister(&mmc_rpmb_bus_type);
   3130}
   3131
   3132module_init(mmc_blk_init);
   3133module_exit(mmc_blk_exit);
   3134
   3135MODULE_LICENSE("GPL");
   3136MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
   3137