cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmc_test.c (74833B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  Copyright 2007-2008 Pierre Ossman
      4 */
      5
      6#include <linux/mmc/core.h>
      7#include <linux/mmc/card.h>
      8#include <linux/mmc/host.h>
      9#include <linux/mmc/mmc.h>
     10#include <linux/slab.h>
     11
     12#include <linux/scatterlist.h>
     13#include <linux/list.h>
     14
     15#include <linux/debugfs.h>
     16#include <linux/uaccess.h>
     17#include <linux/seq_file.h>
     18#include <linux/module.h>
     19
     20#include "core.h"
     21#include "card.h"
     22#include "host.h"
     23#include "bus.h"
     24#include "mmc_ops.h"
     25
     26#define RESULT_OK		0
     27#define RESULT_FAIL		1
     28#define RESULT_UNSUP_HOST	2
     29#define RESULT_UNSUP_CARD	3
     30
     31#define BUFFER_ORDER		2
     32#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
     33
     34#define TEST_ALIGN_END		8
     35
     36/*
     37 * Limit the test area size to the maximum MMC HC erase group size.  Note that
     38 * the maximum SD allocation unit size is just 4MiB.
     39 */
     40#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
     41
     42/**
     43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
     44 * @page: first page in the allocation
     45 * @order: order of the number of pages allocated
     46 */
     47struct mmc_test_pages {
     48	struct page *page;
     49	unsigned int order;
     50};
     51
     52/**
     53 * struct mmc_test_mem - allocated memory.
     54 * @arr: array of allocations
     55 * @cnt: number of allocations
     56 */
     57struct mmc_test_mem {
     58	struct mmc_test_pages *arr;
     59	unsigned int cnt;
     60};
     61
     62/**
     63 * struct mmc_test_area - information for performance tests.
     64 * @max_sz: test area size (in bytes)
     65 * @dev_addr: address on card at which to do performance tests
     66 * @max_tfr: maximum transfer size allowed by driver (in bytes)
     67 * @max_segs: maximum segments allowed by driver in scatterlist @sg
     68 * @max_seg_sz: maximum segment size allowed by driver
     69 * @blocks: number of (512 byte) blocks currently mapped by @sg
     70 * @sg_len: length of currently mapped scatterlist @sg
     71 * @mem: allocated memory
     72 * @sg: scatterlist
     73 * @sg_areq: scatterlist for non-blocking request
     74 */
     75struct mmc_test_area {
     76	unsigned long max_sz;
     77	unsigned int dev_addr;
     78	unsigned int max_tfr;
     79	unsigned int max_segs;
     80	unsigned int max_seg_sz;
     81	unsigned int blocks;
     82	unsigned int sg_len;
     83	struct mmc_test_mem *mem;
     84	struct scatterlist *sg;
     85	struct scatterlist *sg_areq;
     86};
     87
     88/**
     89 * struct mmc_test_transfer_result - transfer results for performance tests.
     90 * @link: double-linked list
     91 * @count: amount of group of sectors to check
     92 * @sectors: amount of sectors to check in one group
     93 * @ts: time values of transfer
     94 * @rate: calculated transfer rate
     95 * @iops: I/O operations per second (times 100)
     96 */
     97struct mmc_test_transfer_result {
     98	struct list_head link;
     99	unsigned int count;
    100	unsigned int sectors;
    101	struct timespec64 ts;
    102	unsigned int rate;
    103	unsigned int iops;
    104};
    105
    106/**
    107 * struct mmc_test_general_result - results for tests.
    108 * @link: double-linked list
    109 * @card: card under test
    110 * @testcase: number of test case
    111 * @result: result of test run
    112 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
    113 */
    114struct mmc_test_general_result {
    115	struct list_head link;
    116	struct mmc_card *card;
    117	int testcase;
    118	int result;
    119	struct list_head tr_lst;
    120};
    121
    122/**
    123 * struct mmc_test_dbgfs_file - debugfs related file.
    124 * @link: double-linked list
    125 * @card: card under test
    126 * @file: file created under debugfs
    127 */
    128struct mmc_test_dbgfs_file {
    129	struct list_head link;
    130	struct mmc_card *card;
    131	struct dentry *file;
    132};
    133
    134/**
    135 * struct mmc_test_card - test information.
    136 * @card: card under test
    137 * @scratch: transfer buffer
    138 * @buffer: transfer buffer
    139 * @highmem: buffer for highmem tests
    140 * @area: information for performance tests
    141 * @gr: pointer to results of current testcase
    142 */
    143struct mmc_test_card {
    144	struct mmc_card	*card;
    145
    146	u8		scratch[BUFFER_SIZE];
    147	u8		*buffer;
    148#ifdef CONFIG_HIGHMEM
    149	struct page	*highmem;
    150#endif
    151	struct mmc_test_area		area;
    152	struct mmc_test_general_result	*gr;
    153};
    154
    155enum mmc_test_prep_media {
    156	MMC_TEST_PREP_NONE = 0,
    157	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
    158	MMC_TEST_PREP_ERASE = 1 << 1,
    159};
    160
    161struct mmc_test_multiple_rw {
    162	unsigned int *sg_len;
    163	unsigned int *bs;
    164	unsigned int len;
    165	unsigned int size;
    166	bool do_write;
    167	bool do_nonblock_req;
    168	enum mmc_test_prep_media prepare;
    169};
    170
    171/*******************************************************************/
    172/*  General helper functions                                       */
    173/*******************************************************************/
    174
    175/*
    176 * Configure correct block size in card
    177 */
    178static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
    179{
    180	return mmc_set_blocklen(test->card, size);
    181}
    182
    183static bool mmc_test_card_cmd23(struct mmc_card *card)
    184{
    185	return mmc_card_mmc(card) ||
    186	       (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
    187}
    188
    189static void mmc_test_prepare_sbc(struct mmc_test_card *test,
    190				 struct mmc_request *mrq, unsigned int blocks)
    191{
    192	struct mmc_card *card = test->card;
    193
    194	if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
    195	    !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
    196	    (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
    197		mrq->sbc = NULL;
    198		return;
    199	}
    200
    201	mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
    202	mrq->sbc->arg = blocks;
    203	mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
    204}
    205
    206/*
    207 * Fill in the mmc_request structure given a set of transfer parameters.
    208 */
    209static void mmc_test_prepare_mrq(struct mmc_test_card *test,
    210	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
    211	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
    212{
    213	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
    214		return;
    215
    216	if (blocks > 1) {
    217		mrq->cmd->opcode = write ?
    218			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
    219	} else {
    220		mrq->cmd->opcode = write ?
    221			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
    222	}
    223
    224	mrq->cmd->arg = dev_addr;
    225	if (!mmc_card_blockaddr(test->card))
    226		mrq->cmd->arg <<= 9;
    227
    228	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
    229
    230	if (blocks == 1)
    231		mrq->stop = NULL;
    232	else {
    233		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
    234		mrq->stop->arg = 0;
    235		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
    236	}
    237
    238	mrq->data->blksz = blksz;
    239	mrq->data->blocks = blocks;
    240	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
    241	mrq->data->sg = sg;
    242	mrq->data->sg_len = sg_len;
    243
    244	mmc_test_prepare_sbc(test, mrq, blocks);
    245
    246	mmc_set_data_timeout(mrq->data, test->card);
    247}
    248
    249static int mmc_test_busy(struct mmc_command *cmd)
    250{
    251	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
    252		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
    253}
    254
    255/*
    256 * Wait for the card to finish the busy state
    257 */
    258static int mmc_test_wait_busy(struct mmc_test_card *test)
    259{
    260	int ret, busy;
    261	struct mmc_command cmd = {};
    262
    263	busy = 0;
    264	do {
    265		memset(&cmd, 0, sizeof(struct mmc_command));
    266
    267		cmd.opcode = MMC_SEND_STATUS;
    268		cmd.arg = test->card->rca << 16;
    269		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
    270
    271		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
    272		if (ret)
    273			break;
    274
    275		if (!busy && mmc_test_busy(&cmd)) {
    276			busy = 1;
    277			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
    278				pr_info("%s: Warning: Host did not wait for busy state to end.\n",
    279					mmc_hostname(test->card->host));
    280		}
    281	} while (mmc_test_busy(&cmd));
    282
    283	return ret;
    284}
    285
    286/*
    287 * Transfer a single sector of kernel addressable data
    288 */
    289static int mmc_test_buffer_transfer(struct mmc_test_card *test,
    290	u8 *buffer, unsigned addr, unsigned blksz, int write)
    291{
    292	struct mmc_request mrq = {};
    293	struct mmc_command cmd = {};
    294	struct mmc_command stop = {};
    295	struct mmc_data data = {};
    296
    297	struct scatterlist sg;
    298
    299	mrq.cmd = &cmd;
    300	mrq.data = &data;
    301	mrq.stop = &stop;
    302
    303	sg_init_one(&sg, buffer, blksz);
    304
    305	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
    306
    307	mmc_wait_for_req(test->card->host, &mrq);
    308
    309	if (cmd.error)
    310		return cmd.error;
    311	if (data.error)
    312		return data.error;
    313
    314	return mmc_test_wait_busy(test);
    315}
    316
    317static void mmc_test_free_mem(struct mmc_test_mem *mem)
    318{
    319	if (!mem)
    320		return;
    321	while (mem->cnt--)
    322		__free_pages(mem->arr[mem->cnt].page,
    323			     mem->arr[mem->cnt].order);
    324	kfree(mem->arr);
    325	kfree(mem);
    326}
    327
    328/*
    329 * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
    330 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
    331 * not exceed a maximum number of segments and try not to make segments much
    332 * bigger than maximum segment size.
    333 */
    334static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
    335					       unsigned long max_sz,
    336					       unsigned int max_segs,
    337					       unsigned int max_seg_sz)
    338{
    339	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
    340	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
    341	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
    342	unsigned long page_cnt = 0;
    343	unsigned long limit = nr_free_buffer_pages() >> 4;
    344	struct mmc_test_mem *mem;
    345
    346	if (max_page_cnt > limit)
    347		max_page_cnt = limit;
    348	if (min_page_cnt > max_page_cnt)
    349		min_page_cnt = max_page_cnt;
    350
    351	if (max_seg_page_cnt > max_page_cnt)
    352		max_seg_page_cnt = max_page_cnt;
    353
    354	if (max_segs > max_page_cnt)
    355		max_segs = max_page_cnt;
    356
    357	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
    358	if (!mem)
    359		return NULL;
    360
    361	mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
    362	if (!mem->arr)
    363		goto out_free;
    364
    365	while (max_page_cnt) {
    366		struct page *page;
    367		unsigned int order;
    368		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
    369				__GFP_NORETRY;
    370
    371		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
    372		while (1) {
    373			page = alloc_pages(flags, order);
    374			if (page || !order)
    375				break;
    376			order -= 1;
    377		}
    378		if (!page) {
    379			if (page_cnt < min_page_cnt)
    380				goto out_free;
    381			break;
    382		}
    383		mem->arr[mem->cnt].page = page;
    384		mem->arr[mem->cnt].order = order;
    385		mem->cnt += 1;
    386		if (max_page_cnt <= (1UL << order))
    387			break;
    388		max_page_cnt -= 1UL << order;
    389		page_cnt += 1UL << order;
    390		if (mem->cnt >= max_segs) {
    391			if (page_cnt < min_page_cnt)
    392				goto out_free;
    393			break;
    394		}
    395	}
    396
    397	return mem;
    398
    399out_free:
    400	mmc_test_free_mem(mem);
    401	return NULL;
    402}
    403
    404/*
    405 * Map memory into a scatterlist.  Optionally allow the same memory to be
    406 * mapped more than once.
    407 */
    408static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
    409			   struct scatterlist *sglist, int repeat,
    410			   unsigned int max_segs, unsigned int max_seg_sz,
    411			   unsigned int *sg_len, int min_sg_len)
    412{
    413	struct scatterlist *sg = NULL;
    414	unsigned int i;
    415	unsigned long sz = size;
    416
    417	sg_init_table(sglist, max_segs);
    418	if (min_sg_len > max_segs)
    419		min_sg_len = max_segs;
    420
    421	*sg_len = 0;
    422	do {
    423		for (i = 0; i < mem->cnt; i++) {
    424			unsigned long len = PAGE_SIZE << mem->arr[i].order;
    425
    426			if (min_sg_len && (size / min_sg_len < len))
    427				len = ALIGN(size / min_sg_len, 512);
    428			if (len > sz)
    429				len = sz;
    430			if (len > max_seg_sz)
    431				len = max_seg_sz;
    432			if (sg)
    433				sg = sg_next(sg);
    434			else
    435				sg = sglist;
    436			if (!sg)
    437				return -EINVAL;
    438			sg_set_page(sg, mem->arr[i].page, len, 0);
    439			sz -= len;
    440			*sg_len += 1;
    441			if (!sz)
    442				break;
    443		}
    444	} while (sz && repeat);
    445
    446	if (sz)
    447		return -EINVAL;
    448
    449	if (sg)
    450		sg_mark_end(sg);
    451
    452	return 0;
    453}
    454
    455/*
    456 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
    457 * same memory to be mapped more than once.
    458 */
    459static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
    460				       unsigned long sz,
    461				       struct scatterlist *sglist,
    462				       unsigned int max_segs,
    463				       unsigned int max_seg_sz,
    464				       unsigned int *sg_len)
    465{
    466	struct scatterlist *sg = NULL;
    467	unsigned int i = mem->cnt, cnt;
    468	unsigned long len;
    469	void *base, *addr, *last_addr = NULL;
    470
    471	sg_init_table(sglist, max_segs);
    472
    473	*sg_len = 0;
    474	while (sz) {
    475		base = page_address(mem->arr[--i].page);
    476		cnt = 1 << mem->arr[i].order;
    477		while (sz && cnt) {
    478			addr = base + PAGE_SIZE * --cnt;
    479			if (last_addr && last_addr + PAGE_SIZE == addr)
    480				continue;
    481			last_addr = addr;
    482			len = PAGE_SIZE;
    483			if (len > max_seg_sz)
    484				len = max_seg_sz;
    485			if (len > sz)
    486				len = sz;
    487			if (sg)
    488				sg = sg_next(sg);
    489			else
    490				sg = sglist;
    491			if (!sg)
    492				return -EINVAL;
    493			sg_set_page(sg, virt_to_page(addr), len, 0);
    494			sz -= len;
    495			*sg_len += 1;
    496		}
    497		if (i == 0)
    498			i = mem->cnt;
    499	}
    500
    501	if (sg)
    502		sg_mark_end(sg);
    503
    504	return 0;
    505}
    506
    507/*
    508 * Calculate transfer rate in bytes per second.
    509 */
    510static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
    511{
    512	uint64_t ns;
    513
    514	ns = timespec64_to_ns(ts);
    515	bytes *= 1000000000;
    516
    517	while (ns > UINT_MAX) {
    518		bytes >>= 1;
    519		ns >>= 1;
    520	}
    521
    522	if (!ns)
    523		return 0;
    524
    525	do_div(bytes, (uint32_t)ns);
    526
    527	return bytes;
    528}
    529
    530/*
    531 * Save transfer results for future usage
    532 */
    533static void mmc_test_save_transfer_result(struct mmc_test_card *test,
    534	unsigned int count, unsigned int sectors, struct timespec64 ts,
    535	unsigned int rate, unsigned int iops)
    536{
    537	struct mmc_test_transfer_result *tr;
    538
    539	if (!test->gr)
    540		return;
    541
    542	tr = kmalloc(sizeof(*tr), GFP_KERNEL);
    543	if (!tr)
    544		return;
    545
    546	tr->count = count;
    547	tr->sectors = sectors;
    548	tr->ts = ts;
    549	tr->rate = rate;
    550	tr->iops = iops;
    551
    552	list_add_tail(&tr->link, &test->gr->tr_lst);
    553}
    554
    555/*
    556 * Print the transfer rate.
    557 */
    558static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
    559				struct timespec64 *ts1, struct timespec64 *ts2)
    560{
    561	unsigned int rate, iops, sectors = bytes >> 9;
    562	struct timespec64 ts;
    563
    564	ts = timespec64_sub(*ts2, *ts1);
    565
    566	rate = mmc_test_rate(bytes, &ts);
    567	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
    568
    569	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
    570			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
    571			 mmc_hostname(test->card->host), sectors, sectors >> 1,
    572			 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
    573			 (u32)ts.tv_nsec, rate / 1000, rate / 1024,
    574			 iops / 100, iops % 100);
    575
    576	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
    577}
    578
    579/*
    580 * Print the average transfer rate.
    581 */
    582static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
    583				    unsigned int count, struct timespec64 *ts1,
    584				    struct timespec64 *ts2)
    585{
    586	unsigned int rate, iops, sectors = bytes >> 9;
    587	uint64_t tot = bytes * count;
    588	struct timespec64 ts;
    589
    590	ts = timespec64_sub(*ts2, *ts1);
    591
    592	rate = mmc_test_rate(tot, &ts);
    593	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
    594
    595	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
    596			 "%llu.%09u seconds (%u kB/s, %u KiB/s, "
    597			 "%u.%02u IOPS, sg_len %d)\n",
    598			 mmc_hostname(test->card->host), count, sectors, count,
    599			 sectors >> 1, (sectors & 1 ? ".5" : ""),
    600			 (u64)ts.tv_sec, (u32)ts.tv_nsec,
    601			 rate / 1000, rate / 1024, iops / 100, iops % 100,
    602			 test->area.sg_len);
    603
    604	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
    605}
    606
    607/*
    608 * Return the card size in sectors.
    609 */
    610static unsigned int mmc_test_capacity(struct mmc_card *card)
    611{
    612	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
    613		return card->ext_csd.sectors;
    614	else
    615		return card->csd.capacity << (card->csd.read_blkbits - 9);
    616}
    617
    618/*******************************************************************/
    619/*  Test preparation and cleanup                                   */
    620/*******************************************************************/
    621
    622/*
    623 * Fill the first couple of sectors of the card with known data
    624 * so that bad reads/writes can be detected
    625 */
    626static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val)
    627{
    628	int ret, i;
    629
    630	ret = mmc_test_set_blksize(test, 512);
    631	if (ret)
    632		return ret;
    633
    634	if (write)
    635		memset(test->buffer, val, 512);
    636	else {
    637		for (i = 0; i < 512; i++)
    638			test->buffer[i] = i;
    639	}
    640
    641	for (i = 0; i < BUFFER_SIZE / 512; i++) {
    642		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
    643		if (ret)
    644			return ret;
    645	}
    646
    647	return 0;
    648}
    649
    650static int mmc_test_prepare_write(struct mmc_test_card *test)
    651{
    652	return __mmc_test_prepare(test, 1, 0xDF);
    653}
    654
    655static int mmc_test_prepare_read(struct mmc_test_card *test)
    656{
    657	return __mmc_test_prepare(test, 0, 0);
    658}
    659
    660static int mmc_test_cleanup(struct mmc_test_card *test)
    661{
    662	return __mmc_test_prepare(test, 1, 0);
    663}
    664
    665/*******************************************************************/
    666/*  Test execution helpers                                         */
    667/*******************************************************************/
    668
    669/*
    670 * Modifies the mmc_request to perform the "short transfer" tests
    671 */
    672static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
    673	struct mmc_request *mrq, int write)
    674{
    675	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
    676		return;
    677
    678	if (mrq->data->blocks > 1) {
    679		mrq->cmd->opcode = write ?
    680			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
    681		mrq->stop = NULL;
    682	} else {
    683		mrq->cmd->opcode = MMC_SEND_STATUS;
    684		mrq->cmd->arg = test->card->rca << 16;
    685	}
    686}
    687
    688/*
    689 * Checks that a normal transfer didn't have any errors
    690 */
    691static int mmc_test_check_result(struct mmc_test_card *test,
    692				 struct mmc_request *mrq)
    693{
    694	int ret;
    695
    696	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
    697		return -EINVAL;
    698
    699	ret = 0;
    700
    701	if (mrq->sbc && mrq->sbc->error)
    702		ret = mrq->sbc->error;
    703	if (!ret && mrq->cmd->error)
    704		ret = mrq->cmd->error;
    705	if (!ret && mrq->data->error)
    706		ret = mrq->data->error;
    707	if (!ret && mrq->stop && mrq->stop->error)
    708		ret = mrq->stop->error;
    709	if (!ret && mrq->data->bytes_xfered !=
    710		mrq->data->blocks * mrq->data->blksz)
    711		ret = RESULT_FAIL;
    712
    713	if (ret == -EINVAL)
    714		ret = RESULT_UNSUP_HOST;
    715
    716	return ret;
    717}
    718
    719/*
    720 * Checks that a "short transfer" behaved as expected
    721 */
    722static int mmc_test_check_broken_result(struct mmc_test_card *test,
    723	struct mmc_request *mrq)
    724{
    725	int ret;
    726
    727	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
    728		return -EINVAL;
    729
    730	ret = 0;
    731
    732	if (!ret && mrq->cmd->error)
    733		ret = mrq->cmd->error;
    734	if (!ret && mrq->data->error == 0)
    735		ret = RESULT_FAIL;
    736	if (!ret && mrq->data->error != -ETIMEDOUT)
    737		ret = mrq->data->error;
    738	if (!ret && mrq->stop && mrq->stop->error)
    739		ret = mrq->stop->error;
    740	if (mrq->data->blocks > 1) {
    741		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
    742			ret = RESULT_FAIL;
    743	} else {
    744		if (!ret && mrq->data->bytes_xfered > 0)
    745			ret = RESULT_FAIL;
    746	}
    747
    748	if (ret == -EINVAL)
    749		ret = RESULT_UNSUP_HOST;
    750
    751	return ret;
    752}
    753
    754struct mmc_test_req {
    755	struct mmc_request mrq;
    756	struct mmc_command sbc;
    757	struct mmc_command cmd;
    758	struct mmc_command stop;
    759	struct mmc_command status;
    760	struct mmc_data data;
    761};
    762
    763/*
    764 * Tests nonblock transfer with certain parameters
    765 */
    766static void mmc_test_req_reset(struct mmc_test_req *rq)
    767{
    768	memset(rq, 0, sizeof(struct mmc_test_req));
    769
    770	rq->mrq.cmd = &rq->cmd;
    771	rq->mrq.data = &rq->data;
    772	rq->mrq.stop = &rq->stop;
    773}
    774
    775static struct mmc_test_req *mmc_test_req_alloc(void)
    776{
    777	struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
    778
    779	if (rq)
    780		mmc_test_req_reset(rq);
    781
    782	return rq;
    783}
    784
    785static void mmc_test_wait_done(struct mmc_request *mrq)
    786{
    787	complete(&mrq->completion);
    788}
    789
    790static int mmc_test_start_areq(struct mmc_test_card *test,
    791			       struct mmc_request *mrq,
    792			       struct mmc_request *prev_mrq)
    793{
    794	struct mmc_host *host = test->card->host;
    795	int err = 0;
    796
    797	if (mrq) {
    798		init_completion(&mrq->completion);
    799		mrq->done = mmc_test_wait_done;
    800		mmc_pre_req(host, mrq);
    801	}
    802
    803	if (prev_mrq) {
    804		wait_for_completion(&prev_mrq->completion);
    805		err = mmc_test_wait_busy(test);
    806		if (!err)
    807			err = mmc_test_check_result(test, prev_mrq);
    808	}
    809
    810	if (!err && mrq) {
    811		err = mmc_start_request(host, mrq);
    812		if (err)
    813			mmc_retune_release(host);
    814	}
    815
    816	if (prev_mrq)
    817		mmc_post_req(host, prev_mrq, 0);
    818
    819	if (err && mrq)
    820		mmc_post_req(host, mrq, err);
    821
    822	return err;
    823}
    824
    825static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
    826				      unsigned int dev_addr, int write,
    827				      int count)
    828{
    829	struct mmc_test_req *rq1, *rq2;
    830	struct mmc_request *mrq, *prev_mrq;
    831	int i;
    832	int ret = RESULT_OK;
    833	struct mmc_test_area *t = &test->area;
    834	struct scatterlist *sg = t->sg;
    835	struct scatterlist *sg_areq = t->sg_areq;
    836
    837	rq1 = mmc_test_req_alloc();
    838	rq2 = mmc_test_req_alloc();
    839	if (!rq1 || !rq2) {
    840		ret = RESULT_FAIL;
    841		goto err;
    842	}
    843
    844	mrq = &rq1->mrq;
    845	prev_mrq = NULL;
    846
    847	for (i = 0; i < count; i++) {
    848		mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
    849		mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
    850				     t->blocks, 512, write);
    851		ret = mmc_test_start_areq(test, mrq, prev_mrq);
    852		if (ret)
    853			goto err;
    854
    855		if (!prev_mrq)
    856			prev_mrq = &rq2->mrq;
    857
    858		swap(mrq, prev_mrq);
    859		swap(sg, sg_areq);
    860		dev_addr += t->blocks;
    861	}
    862
    863	ret = mmc_test_start_areq(test, NULL, prev_mrq);
    864err:
    865	kfree(rq1);
    866	kfree(rq2);
    867	return ret;
    868}
    869
    870/*
    871 * Tests a basic transfer with certain parameters
    872 */
    873static int mmc_test_simple_transfer(struct mmc_test_card *test,
    874	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
    875	unsigned blocks, unsigned blksz, int write)
    876{
    877	struct mmc_request mrq = {};
    878	struct mmc_command cmd = {};
    879	struct mmc_command stop = {};
    880	struct mmc_data data = {};
    881
    882	mrq.cmd = &cmd;
    883	mrq.data = &data;
    884	mrq.stop = &stop;
    885
    886	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
    887		blocks, blksz, write);
    888
    889	mmc_wait_for_req(test->card->host, &mrq);
    890
    891	mmc_test_wait_busy(test);
    892
    893	return mmc_test_check_result(test, &mrq);
    894}
    895
    896/*
    897 * Tests a transfer where the card will fail completely or partly
    898 */
    899static int mmc_test_broken_transfer(struct mmc_test_card *test,
    900	unsigned blocks, unsigned blksz, int write)
    901{
    902	struct mmc_request mrq = {};
    903	struct mmc_command cmd = {};
    904	struct mmc_command stop = {};
    905	struct mmc_data data = {};
    906
    907	struct scatterlist sg;
    908
    909	mrq.cmd = &cmd;
    910	mrq.data = &data;
    911	mrq.stop = &stop;
    912
    913	sg_init_one(&sg, test->buffer, blocks * blksz);
    914
    915	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
    916	mmc_test_prepare_broken_mrq(test, &mrq, write);
    917
    918	mmc_wait_for_req(test->card->host, &mrq);
    919
    920	mmc_test_wait_busy(test);
    921
    922	return mmc_test_check_broken_result(test, &mrq);
    923}
    924
    925/*
    926 * Does a complete transfer test where data is also validated
    927 *
    928 * Note: mmc_test_prepare() must have been done before this call
    929 */
    930static int mmc_test_transfer(struct mmc_test_card *test,
    931	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
    932	unsigned blocks, unsigned blksz, int write)
    933{
    934	int ret, i;
    935	unsigned long flags;
    936
    937	if (write) {
    938		for (i = 0; i < blocks * blksz; i++)
    939			test->scratch[i] = i;
    940	} else {
    941		memset(test->scratch, 0, BUFFER_SIZE);
    942	}
    943	local_irq_save(flags);
    944	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
    945	local_irq_restore(flags);
    946
    947	ret = mmc_test_set_blksize(test, blksz);
    948	if (ret)
    949		return ret;
    950
    951	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
    952		blocks, blksz, write);
    953	if (ret)
    954		return ret;
    955
    956	if (write) {
    957		int sectors;
    958
    959		ret = mmc_test_set_blksize(test, 512);
    960		if (ret)
    961			return ret;
    962
    963		sectors = (blocks * blksz + 511) / 512;
    964		if ((sectors * 512) == (blocks * blksz))
    965			sectors++;
    966
    967		if ((sectors * 512) > BUFFER_SIZE)
    968			return -EINVAL;
    969
    970		memset(test->buffer, 0, sectors * 512);
    971
    972		for (i = 0; i < sectors; i++) {
    973			ret = mmc_test_buffer_transfer(test,
    974				test->buffer + i * 512,
    975				dev_addr + i, 512, 0);
    976			if (ret)
    977				return ret;
    978		}
    979
    980		for (i = 0; i < blocks * blksz; i++) {
    981			if (test->buffer[i] != (u8)i)
    982				return RESULT_FAIL;
    983		}
    984
    985		for (; i < sectors * 512; i++) {
    986			if (test->buffer[i] != 0xDF)
    987				return RESULT_FAIL;
    988		}
    989	} else {
    990		local_irq_save(flags);
    991		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
    992		local_irq_restore(flags);
    993		for (i = 0; i < blocks * blksz; i++) {
    994			if (test->scratch[i] != (u8)i)
    995				return RESULT_FAIL;
    996		}
    997	}
    998
    999	return 0;
   1000}
   1001
   1002/*******************************************************************/
   1003/*  Tests                                                          */
   1004/*******************************************************************/
   1005
   1006struct mmc_test_case {
   1007	const char *name;
   1008
   1009	int (*prepare)(struct mmc_test_card *);
   1010	int (*run)(struct mmc_test_card *);
   1011	int (*cleanup)(struct mmc_test_card *);
   1012};
   1013
   1014static int mmc_test_basic_write(struct mmc_test_card *test)
   1015{
   1016	int ret;
   1017	struct scatterlist sg;
   1018
   1019	ret = mmc_test_set_blksize(test, 512);
   1020	if (ret)
   1021		return ret;
   1022
   1023	sg_init_one(&sg, test->buffer, 512);
   1024
   1025	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
   1026}
   1027
   1028static int mmc_test_basic_read(struct mmc_test_card *test)
   1029{
   1030	int ret;
   1031	struct scatterlist sg;
   1032
   1033	ret = mmc_test_set_blksize(test, 512);
   1034	if (ret)
   1035		return ret;
   1036
   1037	sg_init_one(&sg, test->buffer, 512);
   1038
   1039	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
   1040}
   1041
   1042static int mmc_test_verify_write(struct mmc_test_card *test)
   1043{
   1044	struct scatterlist sg;
   1045
   1046	sg_init_one(&sg, test->buffer, 512);
   1047
   1048	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
   1049}
   1050
   1051static int mmc_test_verify_read(struct mmc_test_card *test)
   1052{
   1053	struct scatterlist sg;
   1054
   1055	sg_init_one(&sg, test->buffer, 512);
   1056
   1057	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
   1058}
   1059
   1060static int mmc_test_multi_write(struct mmc_test_card *test)
   1061{
   1062	unsigned int size;
   1063	struct scatterlist sg;
   1064
   1065	if (test->card->host->max_blk_count == 1)
   1066		return RESULT_UNSUP_HOST;
   1067
   1068	size = PAGE_SIZE * 2;
   1069	size = min(size, test->card->host->max_req_size);
   1070	size = min(size, test->card->host->max_seg_size);
   1071	size = min(size, test->card->host->max_blk_count * 512);
   1072
   1073	if (size < 1024)
   1074		return RESULT_UNSUP_HOST;
   1075
   1076	sg_init_one(&sg, test->buffer, size);
   1077
   1078	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
   1079}
   1080
   1081static int mmc_test_multi_read(struct mmc_test_card *test)
   1082{
   1083	unsigned int size;
   1084	struct scatterlist sg;
   1085
   1086	if (test->card->host->max_blk_count == 1)
   1087		return RESULT_UNSUP_HOST;
   1088
   1089	size = PAGE_SIZE * 2;
   1090	size = min(size, test->card->host->max_req_size);
   1091	size = min(size, test->card->host->max_seg_size);
   1092	size = min(size, test->card->host->max_blk_count * 512);
   1093
   1094	if (size < 1024)
   1095		return RESULT_UNSUP_HOST;
   1096
   1097	sg_init_one(&sg, test->buffer, size);
   1098
   1099	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
   1100}
   1101
   1102static int mmc_test_pow2_write(struct mmc_test_card *test)
   1103{
   1104	int ret, i;
   1105	struct scatterlist sg;
   1106
   1107	if (!test->card->csd.write_partial)
   1108		return RESULT_UNSUP_CARD;
   1109
   1110	for (i = 1; i < 512; i <<= 1) {
   1111		sg_init_one(&sg, test->buffer, i);
   1112		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
   1113		if (ret)
   1114			return ret;
   1115	}
   1116
   1117	return 0;
   1118}
   1119
   1120static int mmc_test_pow2_read(struct mmc_test_card *test)
   1121{
   1122	int ret, i;
   1123	struct scatterlist sg;
   1124
   1125	if (!test->card->csd.read_partial)
   1126		return RESULT_UNSUP_CARD;
   1127
   1128	for (i = 1; i < 512; i <<= 1) {
   1129		sg_init_one(&sg, test->buffer, i);
   1130		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
   1131		if (ret)
   1132			return ret;
   1133	}
   1134
   1135	return 0;
   1136}
   1137
   1138static int mmc_test_weird_write(struct mmc_test_card *test)
   1139{
   1140	int ret, i;
   1141	struct scatterlist sg;
   1142
   1143	if (!test->card->csd.write_partial)
   1144		return RESULT_UNSUP_CARD;
   1145
   1146	for (i = 3; i < 512; i += 7) {
   1147		sg_init_one(&sg, test->buffer, i);
   1148		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
   1149		if (ret)
   1150			return ret;
   1151	}
   1152
   1153	return 0;
   1154}
   1155
   1156static int mmc_test_weird_read(struct mmc_test_card *test)
   1157{
   1158	int ret, i;
   1159	struct scatterlist sg;
   1160
   1161	if (!test->card->csd.read_partial)
   1162		return RESULT_UNSUP_CARD;
   1163
   1164	for (i = 3; i < 512; i += 7) {
   1165		sg_init_one(&sg, test->buffer, i);
   1166		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
   1167		if (ret)
   1168			return ret;
   1169	}
   1170
   1171	return 0;
   1172}
   1173
   1174static int mmc_test_align_write(struct mmc_test_card *test)
   1175{
   1176	int ret, i;
   1177	struct scatterlist sg;
   1178
   1179	for (i = 1; i < TEST_ALIGN_END; i++) {
   1180		sg_init_one(&sg, test->buffer + i, 512);
   1181		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
   1182		if (ret)
   1183			return ret;
   1184	}
   1185
   1186	return 0;
   1187}
   1188
   1189static int mmc_test_align_read(struct mmc_test_card *test)
   1190{
   1191	int ret, i;
   1192	struct scatterlist sg;
   1193
   1194	for (i = 1; i < TEST_ALIGN_END; i++) {
   1195		sg_init_one(&sg, test->buffer + i, 512);
   1196		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
   1197		if (ret)
   1198			return ret;
   1199	}
   1200
   1201	return 0;
   1202}
   1203
   1204static int mmc_test_align_multi_write(struct mmc_test_card *test)
   1205{
   1206	int ret, i;
   1207	unsigned int size;
   1208	struct scatterlist sg;
   1209
   1210	if (test->card->host->max_blk_count == 1)
   1211		return RESULT_UNSUP_HOST;
   1212
   1213	size = PAGE_SIZE * 2;
   1214	size = min(size, test->card->host->max_req_size);
   1215	size = min(size, test->card->host->max_seg_size);
   1216	size = min(size, test->card->host->max_blk_count * 512);
   1217
   1218	if (size < 1024)
   1219		return RESULT_UNSUP_HOST;
   1220
   1221	for (i = 1; i < TEST_ALIGN_END; i++) {
   1222		sg_init_one(&sg, test->buffer + i, size);
   1223		ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
   1224		if (ret)
   1225			return ret;
   1226	}
   1227
   1228	return 0;
   1229}
   1230
   1231static int mmc_test_align_multi_read(struct mmc_test_card *test)
   1232{
   1233	int ret, i;
   1234	unsigned int size;
   1235	struct scatterlist sg;
   1236
   1237	if (test->card->host->max_blk_count == 1)
   1238		return RESULT_UNSUP_HOST;
   1239
   1240	size = PAGE_SIZE * 2;
   1241	size = min(size, test->card->host->max_req_size);
   1242	size = min(size, test->card->host->max_seg_size);
   1243	size = min(size, test->card->host->max_blk_count * 512);
   1244
   1245	if (size < 1024)
   1246		return RESULT_UNSUP_HOST;
   1247
   1248	for (i = 1; i < TEST_ALIGN_END; i++) {
   1249		sg_init_one(&sg, test->buffer + i, size);
   1250		ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
   1251		if (ret)
   1252			return ret;
   1253	}
   1254
   1255	return 0;
   1256}
   1257
   1258static int mmc_test_xfersize_write(struct mmc_test_card *test)
   1259{
   1260	int ret;
   1261
   1262	ret = mmc_test_set_blksize(test, 512);
   1263	if (ret)
   1264		return ret;
   1265
   1266	return mmc_test_broken_transfer(test, 1, 512, 1);
   1267}
   1268
   1269static int mmc_test_xfersize_read(struct mmc_test_card *test)
   1270{
   1271	int ret;
   1272
   1273	ret = mmc_test_set_blksize(test, 512);
   1274	if (ret)
   1275		return ret;
   1276
   1277	return mmc_test_broken_transfer(test, 1, 512, 0);
   1278}
   1279
   1280static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
   1281{
   1282	int ret;
   1283
   1284	if (test->card->host->max_blk_count == 1)
   1285		return RESULT_UNSUP_HOST;
   1286
   1287	ret = mmc_test_set_blksize(test, 512);
   1288	if (ret)
   1289		return ret;
   1290
   1291	return mmc_test_broken_transfer(test, 2, 512, 1);
   1292}
   1293
   1294static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
   1295{
   1296	int ret;
   1297
   1298	if (test->card->host->max_blk_count == 1)
   1299		return RESULT_UNSUP_HOST;
   1300
   1301	ret = mmc_test_set_blksize(test, 512);
   1302	if (ret)
   1303		return ret;
   1304
   1305	return mmc_test_broken_transfer(test, 2, 512, 0);
   1306}
   1307
   1308#ifdef CONFIG_HIGHMEM
   1309
   1310static int mmc_test_write_high(struct mmc_test_card *test)
   1311{
   1312	struct scatterlist sg;
   1313
   1314	sg_init_table(&sg, 1);
   1315	sg_set_page(&sg, test->highmem, 512, 0);
   1316
   1317	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
   1318}
   1319
   1320static int mmc_test_read_high(struct mmc_test_card *test)
   1321{
   1322	struct scatterlist sg;
   1323
   1324	sg_init_table(&sg, 1);
   1325	sg_set_page(&sg, test->highmem, 512, 0);
   1326
   1327	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
   1328}
   1329
   1330static int mmc_test_multi_write_high(struct mmc_test_card *test)
   1331{
   1332	unsigned int size;
   1333	struct scatterlist sg;
   1334
   1335	if (test->card->host->max_blk_count == 1)
   1336		return RESULT_UNSUP_HOST;
   1337
   1338	size = PAGE_SIZE * 2;
   1339	size = min(size, test->card->host->max_req_size);
   1340	size = min(size, test->card->host->max_seg_size);
   1341	size = min(size, test->card->host->max_blk_count * 512);
   1342
   1343	if (size < 1024)
   1344		return RESULT_UNSUP_HOST;
   1345
   1346	sg_init_table(&sg, 1);
   1347	sg_set_page(&sg, test->highmem, size, 0);
   1348
   1349	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
   1350}
   1351
   1352static int mmc_test_multi_read_high(struct mmc_test_card *test)
   1353{
   1354	unsigned int size;
   1355	struct scatterlist sg;
   1356
   1357	if (test->card->host->max_blk_count == 1)
   1358		return RESULT_UNSUP_HOST;
   1359
   1360	size = PAGE_SIZE * 2;
   1361	size = min(size, test->card->host->max_req_size);
   1362	size = min(size, test->card->host->max_seg_size);
   1363	size = min(size, test->card->host->max_blk_count * 512);
   1364
   1365	if (size < 1024)
   1366		return RESULT_UNSUP_HOST;
   1367
   1368	sg_init_table(&sg, 1);
   1369	sg_set_page(&sg, test->highmem, size, 0);
   1370
   1371	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
   1372}
   1373
   1374#else
   1375
   1376static int mmc_test_no_highmem(struct mmc_test_card *test)
   1377{
   1378	pr_info("%s: Highmem not configured - test skipped\n",
   1379	       mmc_hostname(test->card->host));
   1380	return 0;
   1381}
   1382
   1383#endif /* CONFIG_HIGHMEM */
   1384
   1385/*
   1386 * Map sz bytes so that it can be transferred.
   1387 */
   1388static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
   1389			     int max_scatter, int min_sg_len, bool nonblock)
   1390{
   1391	struct mmc_test_area *t = &test->area;
   1392	int err;
   1393	unsigned int sg_len = 0;
   1394
   1395	t->blocks = sz >> 9;
   1396
   1397	if (max_scatter) {
   1398		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
   1399						  t->max_segs, t->max_seg_sz,
   1400				       &t->sg_len);
   1401	} else {
   1402		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
   1403				      t->max_seg_sz, &t->sg_len, min_sg_len);
   1404	}
   1405
   1406	if (err || !nonblock)
   1407		goto err;
   1408
   1409	if (max_scatter) {
   1410		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
   1411						  t->max_segs, t->max_seg_sz,
   1412						  &sg_len);
   1413	} else {
   1414		err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
   1415				      t->max_seg_sz, &sg_len, min_sg_len);
   1416	}
   1417	if (!err && sg_len != t->sg_len)
   1418		err = -EINVAL;
   1419
   1420err:
   1421	if (err)
   1422		pr_info("%s: Failed to map sg list\n",
   1423		       mmc_hostname(test->card->host));
   1424	return err;
   1425}
   1426
   1427/*
   1428 * Transfer bytes mapped by mmc_test_area_map().
   1429 */
   1430static int mmc_test_area_transfer(struct mmc_test_card *test,
   1431				  unsigned int dev_addr, int write)
   1432{
   1433	struct mmc_test_area *t = &test->area;
   1434
   1435	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
   1436					t->blocks, 512, write);
   1437}
   1438
   1439/*
   1440 * Map and transfer bytes for multiple transfers.
   1441 */
   1442static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
   1443				unsigned int dev_addr, int write,
   1444				int max_scatter, int timed, int count,
   1445				bool nonblock, int min_sg_len)
   1446{
   1447	struct timespec64 ts1, ts2;
   1448	int ret = 0;
   1449	int i;
   1450
   1451	/*
   1452	 * In the case of a maximally scattered transfer, the maximum transfer
   1453	 * size is further limited by using PAGE_SIZE segments.
   1454	 */
   1455	if (max_scatter) {
   1456		struct mmc_test_area *t = &test->area;
   1457		unsigned long max_tfr;
   1458
   1459		if (t->max_seg_sz >= PAGE_SIZE)
   1460			max_tfr = t->max_segs * PAGE_SIZE;
   1461		else
   1462			max_tfr = t->max_segs * t->max_seg_sz;
   1463		if (sz > max_tfr)
   1464			sz = max_tfr;
   1465	}
   1466
   1467	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
   1468	if (ret)
   1469		return ret;
   1470
   1471	if (timed)
   1472		ktime_get_ts64(&ts1);
   1473	if (nonblock)
   1474		ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
   1475	else
   1476		for (i = 0; i < count && ret == 0; i++) {
   1477			ret = mmc_test_area_transfer(test, dev_addr, write);
   1478			dev_addr += sz >> 9;
   1479		}
   1480
   1481	if (ret)
   1482		return ret;
   1483
   1484	if (timed)
   1485		ktime_get_ts64(&ts2);
   1486
   1487	if (timed)
   1488		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
   1489
   1490	return 0;
   1491}
   1492
   1493static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
   1494			    unsigned int dev_addr, int write, int max_scatter,
   1495			    int timed)
   1496{
   1497	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
   1498				    timed, 1, false, 0);
   1499}
   1500
   1501/*
   1502 * Write the test area entirely.
   1503 */
   1504static int mmc_test_area_fill(struct mmc_test_card *test)
   1505{
   1506	struct mmc_test_area *t = &test->area;
   1507
   1508	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
   1509}
   1510
   1511/*
   1512 * Erase the test area entirely.
   1513 */
   1514static int mmc_test_area_erase(struct mmc_test_card *test)
   1515{
   1516	struct mmc_test_area *t = &test->area;
   1517
   1518	if (!mmc_can_erase(test->card))
   1519		return 0;
   1520
   1521	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
   1522			 MMC_ERASE_ARG);
   1523}
   1524
   1525/*
   1526 * Cleanup struct mmc_test_area.
   1527 */
   1528static int mmc_test_area_cleanup(struct mmc_test_card *test)
   1529{
   1530	struct mmc_test_area *t = &test->area;
   1531
   1532	kfree(t->sg);
   1533	kfree(t->sg_areq);
   1534	mmc_test_free_mem(t->mem);
   1535
   1536	return 0;
   1537}
   1538
   1539/*
   1540 * Initialize an area for testing large transfers.  The test area is set to the
   1541 * middle of the card because cards may have different characteristics at the
   1542 * front (for FAT file system optimization).  Optionally, the area is erased
   1543 * (if the card supports it) which may improve write performance.  Optionally,
   1544 * the area is filled with data for subsequent read tests.
   1545 */
   1546static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
   1547{
   1548	struct mmc_test_area *t = &test->area;
   1549	unsigned long min_sz = 64 * 1024, sz;
   1550	int ret;
   1551
   1552	ret = mmc_test_set_blksize(test, 512);
   1553	if (ret)
   1554		return ret;
   1555
   1556	/* Make the test area size about 4MiB */
   1557	sz = (unsigned long)test->card->pref_erase << 9;
   1558	t->max_sz = sz;
   1559	while (t->max_sz < 4 * 1024 * 1024)
   1560		t->max_sz += sz;
   1561	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
   1562		t->max_sz -= sz;
   1563
   1564	t->max_segs = test->card->host->max_segs;
   1565	t->max_seg_sz = test->card->host->max_seg_size;
   1566	t->max_seg_sz -= t->max_seg_sz % 512;
   1567
   1568	t->max_tfr = t->max_sz;
   1569	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
   1570		t->max_tfr = test->card->host->max_blk_count << 9;
   1571	if (t->max_tfr > test->card->host->max_req_size)
   1572		t->max_tfr = test->card->host->max_req_size;
   1573	if (t->max_tfr / t->max_seg_sz > t->max_segs)
   1574		t->max_tfr = t->max_segs * t->max_seg_sz;
   1575
   1576	/*
   1577	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
   1578	 * because the same memory can be mapped into the scatterlist more than
   1579	 * once.  Also, take into account the limits imposed on scatterlist
   1580	 * segments by the host driver.
   1581	 */
   1582	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
   1583				    t->max_seg_sz);
   1584	if (!t->mem)
   1585		return -ENOMEM;
   1586
   1587	t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
   1588	if (!t->sg) {
   1589		ret = -ENOMEM;
   1590		goto out_free;
   1591	}
   1592
   1593	t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
   1594				   GFP_KERNEL);
   1595	if (!t->sg_areq) {
   1596		ret = -ENOMEM;
   1597		goto out_free;
   1598	}
   1599
   1600	t->dev_addr = mmc_test_capacity(test->card) / 2;
   1601	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
   1602
   1603	if (erase) {
   1604		ret = mmc_test_area_erase(test);
   1605		if (ret)
   1606			goto out_free;
   1607	}
   1608
   1609	if (fill) {
   1610		ret = mmc_test_area_fill(test);
   1611		if (ret)
   1612			goto out_free;
   1613	}
   1614
   1615	return 0;
   1616
   1617out_free:
   1618	mmc_test_area_cleanup(test);
   1619	return ret;
   1620}
   1621
   1622/*
   1623 * Prepare for large transfers.  Do not erase the test area.
   1624 */
   1625static int mmc_test_area_prepare(struct mmc_test_card *test)
   1626{
   1627	return mmc_test_area_init(test, 0, 0);
   1628}
   1629
   1630/*
   1631 * Prepare for large transfers.  Do erase the test area.
   1632 */
   1633static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
   1634{
   1635	return mmc_test_area_init(test, 1, 0);
   1636}
   1637
   1638/*
   1639 * Prepare for large transfers.  Erase and fill the test area.
   1640 */
   1641static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
   1642{
   1643	return mmc_test_area_init(test, 1, 1);
   1644}
   1645
   1646/*
   1647 * Test best-case performance.  Best-case performance is expected from
   1648 * a single large transfer.
   1649 *
   1650 * An additional option (max_scatter) allows the measurement of the same
   1651 * transfer but with no contiguous pages in the scatter list.  This tests
   1652 * the efficiency of DMA to handle scattered pages.
   1653 */
   1654static int mmc_test_best_performance(struct mmc_test_card *test, int write,
   1655				     int max_scatter)
   1656{
   1657	struct mmc_test_area *t = &test->area;
   1658
   1659	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
   1660				max_scatter, 1);
   1661}
   1662
   1663/*
   1664 * Best-case read performance.
   1665 */
   1666static int mmc_test_best_read_performance(struct mmc_test_card *test)
   1667{
   1668	return mmc_test_best_performance(test, 0, 0);
   1669}
   1670
   1671/*
   1672 * Best-case write performance.
   1673 */
   1674static int mmc_test_best_write_performance(struct mmc_test_card *test)
   1675{
   1676	return mmc_test_best_performance(test, 1, 0);
   1677}
   1678
   1679/*
   1680 * Best-case read performance into scattered pages.
   1681 */
   1682static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
   1683{
   1684	return mmc_test_best_performance(test, 0, 1);
   1685}
   1686
   1687/*
   1688 * Best-case write performance from scattered pages.
   1689 */
   1690static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
   1691{
   1692	return mmc_test_best_performance(test, 1, 1);
   1693}
   1694
   1695/*
   1696 * Single read performance by transfer size.
   1697 */
   1698static int mmc_test_profile_read_perf(struct mmc_test_card *test)
   1699{
   1700	struct mmc_test_area *t = &test->area;
   1701	unsigned long sz;
   1702	unsigned int dev_addr;
   1703	int ret;
   1704
   1705	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
   1706		dev_addr = t->dev_addr + (sz >> 9);
   1707		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
   1708		if (ret)
   1709			return ret;
   1710	}
   1711	sz = t->max_tfr;
   1712	dev_addr = t->dev_addr;
   1713	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
   1714}
   1715
   1716/*
   1717 * Single write performance by transfer size.
   1718 */
   1719static int mmc_test_profile_write_perf(struct mmc_test_card *test)
   1720{
   1721	struct mmc_test_area *t = &test->area;
   1722	unsigned long sz;
   1723	unsigned int dev_addr;
   1724	int ret;
   1725
   1726	ret = mmc_test_area_erase(test);
   1727	if (ret)
   1728		return ret;
   1729	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
   1730		dev_addr = t->dev_addr + (sz >> 9);
   1731		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
   1732		if (ret)
   1733			return ret;
   1734	}
   1735	ret = mmc_test_area_erase(test);
   1736	if (ret)
   1737		return ret;
   1738	sz = t->max_tfr;
   1739	dev_addr = t->dev_addr;
   1740	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
   1741}
   1742
   1743/*
   1744 * Single trim performance by transfer size.
   1745 */
   1746static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
   1747{
   1748	struct mmc_test_area *t = &test->area;
   1749	unsigned long sz;
   1750	unsigned int dev_addr;
   1751	struct timespec64 ts1, ts2;
   1752	int ret;
   1753
   1754	if (!mmc_can_trim(test->card))
   1755		return RESULT_UNSUP_CARD;
   1756
   1757	if (!mmc_can_erase(test->card))
   1758		return RESULT_UNSUP_HOST;
   1759
   1760	for (sz = 512; sz < t->max_sz; sz <<= 1) {
   1761		dev_addr = t->dev_addr + (sz >> 9);
   1762		ktime_get_ts64(&ts1);
   1763		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
   1764		if (ret)
   1765			return ret;
   1766		ktime_get_ts64(&ts2);
   1767		mmc_test_print_rate(test, sz, &ts1, &ts2);
   1768	}
   1769	dev_addr = t->dev_addr;
   1770	ktime_get_ts64(&ts1);
   1771	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
   1772	if (ret)
   1773		return ret;
   1774	ktime_get_ts64(&ts2);
   1775	mmc_test_print_rate(test, sz, &ts1, &ts2);
   1776	return 0;
   1777}
   1778
   1779static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
   1780{
   1781	struct mmc_test_area *t = &test->area;
   1782	unsigned int dev_addr, i, cnt;
   1783	struct timespec64 ts1, ts2;
   1784	int ret;
   1785
   1786	cnt = t->max_sz / sz;
   1787	dev_addr = t->dev_addr;
   1788	ktime_get_ts64(&ts1);
   1789	for (i = 0; i < cnt; i++) {
   1790		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
   1791		if (ret)
   1792			return ret;
   1793		dev_addr += (sz >> 9);
   1794	}
   1795	ktime_get_ts64(&ts2);
   1796	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
   1797	return 0;
   1798}
   1799
   1800/*
   1801 * Consecutive read performance by transfer size.
   1802 */
   1803static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
   1804{
   1805	struct mmc_test_area *t = &test->area;
   1806	unsigned long sz;
   1807	int ret;
   1808
   1809	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
   1810		ret = mmc_test_seq_read_perf(test, sz);
   1811		if (ret)
   1812			return ret;
   1813	}
   1814	sz = t->max_tfr;
   1815	return mmc_test_seq_read_perf(test, sz);
   1816}
   1817
   1818static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
   1819{
   1820	struct mmc_test_area *t = &test->area;
   1821	unsigned int dev_addr, i, cnt;
   1822	struct timespec64 ts1, ts2;
   1823	int ret;
   1824
   1825	ret = mmc_test_area_erase(test);
   1826	if (ret)
   1827		return ret;
   1828	cnt = t->max_sz / sz;
   1829	dev_addr = t->dev_addr;
   1830	ktime_get_ts64(&ts1);
   1831	for (i = 0; i < cnt; i++) {
   1832		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
   1833		if (ret)
   1834			return ret;
   1835		dev_addr += (sz >> 9);
   1836	}
   1837	ktime_get_ts64(&ts2);
   1838	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
   1839	return 0;
   1840}
   1841
   1842/*
   1843 * Consecutive write performance by transfer size.
   1844 */
   1845static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
   1846{
   1847	struct mmc_test_area *t = &test->area;
   1848	unsigned long sz;
   1849	int ret;
   1850
   1851	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
   1852		ret = mmc_test_seq_write_perf(test, sz);
   1853		if (ret)
   1854			return ret;
   1855	}
   1856	sz = t->max_tfr;
   1857	return mmc_test_seq_write_perf(test, sz);
   1858}
   1859
   1860/*
   1861 * Consecutive trim performance by transfer size.
   1862 */
   1863static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
   1864{
   1865	struct mmc_test_area *t = &test->area;
   1866	unsigned long sz;
   1867	unsigned int dev_addr, i, cnt;
   1868	struct timespec64 ts1, ts2;
   1869	int ret;
   1870
   1871	if (!mmc_can_trim(test->card))
   1872		return RESULT_UNSUP_CARD;
   1873
   1874	if (!mmc_can_erase(test->card))
   1875		return RESULT_UNSUP_HOST;
   1876
   1877	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
   1878		ret = mmc_test_area_erase(test);
   1879		if (ret)
   1880			return ret;
   1881		ret = mmc_test_area_fill(test);
   1882		if (ret)
   1883			return ret;
   1884		cnt = t->max_sz / sz;
   1885		dev_addr = t->dev_addr;
   1886		ktime_get_ts64(&ts1);
   1887		for (i = 0; i < cnt; i++) {
   1888			ret = mmc_erase(test->card, dev_addr, sz >> 9,
   1889					MMC_TRIM_ARG);
   1890			if (ret)
   1891				return ret;
   1892			dev_addr += (sz >> 9);
   1893		}
   1894		ktime_get_ts64(&ts2);
   1895		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
   1896	}
   1897	return 0;
   1898}
   1899
   1900static unsigned int rnd_next = 1;
   1901
   1902static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
   1903{
   1904	uint64_t r;
   1905
   1906	rnd_next = rnd_next * 1103515245 + 12345;
   1907	r = (rnd_next >> 16) & 0x7fff;
   1908	return (r * rnd_cnt) >> 15;
   1909}
   1910
   1911static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
   1912			     unsigned long sz)
   1913{
   1914	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
   1915	unsigned int ssz;
   1916	struct timespec64 ts1, ts2, ts;
   1917	int ret;
   1918
   1919	ssz = sz >> 9;
   1920
   1921	rnd_addr = mmc_test_capacity(test->card) / 4;
   1922	range1 = rnd_addr / test->card->pref_erase;
   1923	range2 = range1 / ssz;
   1924
   1925	ktime_get_ts64(&ts1);
   1926	for (cnt = 0; cnt < UINT_MAX; cnt++) {
   1927		ktime_get_ts64(&ts2);
   1928		ts = timespec64_sub(ts2, ts1);
   1929		if (ts.tv_sec >= 10)
   1930			break;
   1931		ea = mmc_test_rnd_num(range1);
   1932		if (ea == last_ea)
   1933			ea -= 1;
   1934		last_ea = ea;
   1935		dev_addr = rnd_addr + test->card->pref_erase * ea +
   1936			   ssz * mmc_test_rnd_num(range2);
   1937		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
   1938		if (ret)
   1939			return ret;
   1940	}
   1941	if (print)
   1942		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
   1943	return 0;
   1944}
   1945
   1946static int mmc_test_random_perf(struct mmc_test_card *test, int write)
   1947{
   1948	struct mmc_test_area *t = &test->area;
   1949	unsigned int next;
   1950	unsigned long sz;
   1951	int ret;
   1952
   1953	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
   1954		/*
   1955		 * When writing, try to get more consistent results by running
   1956		 * the test twice with exactly the same I/O but outputting the
   1957		 * results only for the 2nd run.
   1958		 */
   1959		if (write) {
   1960			next = rnd_next;
   1961			ret = mmc_test_rnd_perf(test, write, 0, sz);
   1962			if (ret)
   1963				return ret;
   1964			rnd_next = next;
   1965		}
   1966		ret = mmc_test_rnd_perf(test, write, 1, sz);
   1967		if (ret)
   1968			return ret;
   1969	}
   1970	sz = t->max_tfr;
   1971	if (write) {
   1972		next = rnd_next;
   1973		ret = mmc_test_rnd_perf(test, write, 0, sz);
   1974		if (ret)
   1975			return ret;
   1976		rnd_next = next;
   1977	}
   1978	return mmc_test_rnd_perf(test, write, 1, sz);
   1979}
   1980
   1981/*
   1982 * Random read performance by transfer size.
   1983 */
   1984static int mmc_test_random_read_perf(struct mmc_test_card *test)
   1985{
   1986	return mmc_test_random_perf(test, 0);
   1987}
   1988
   1989/*
   1990 * Random write performance by transfer size.
   1991 */
   1992static int mmc_test_random_write_perf(struct mmc_test_card *test)
   1993{
   1994	return mmc_test_random_perf(test, 1);
   1995}
   1996
   1997static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
   1998			     unsigned int tot_sz, int max_scatter)
   1999{
   2000	struct mmc_test_area *t = &test->area;
   2001	unsigned int dev_addr, i, cnt, sz, ssz;
   2002	struct timespec64 ts1, ts2;
   2003	int ret;
   2004
   2005	sz = t->max_tfr;
   2006
   2007	/*
   2008	 * In the case of a maximally scattered transfer, the maximum transfer
   2009	 * size is further limited by using PAGE_SIZE segments.
   2010	 */
   2011	if (max_scatter) {
   2012		unsigned long max_tfr;
   2013
   2014		if (t->max_seg_sz >= PAGE_SIZE)
   2015			max_tfr = t->max_segs * PAGE_SIZE;
   2016		else
   2017			max_tfr = t->max_segs * t->max_seg_sz;
   2018		if (sz > max_tfr)
   2019			sz = max_tfr;
   2020	}
   2021
   2022	ssz = sz >> 9;
   2023	dev_addr = mmc_test_capacity(test->card) / 4;
   2024	if (tot_sz > dev_addr << 9)
   2025		tot_sz = dev_addr << 9;
   2026	cnt = tot_sz / sz;
   2027	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
   2028
   2029	ktime_get_ts64(&ts1);
   2030	for (i = 0; i < cnt; i++) {
   2031		ret = mmc_test_area_io(test, sz, dev_addr, write,
   2032				       max_scatter, 0);
   2033		if (ret)
   2034			return ret;
   2035		dev_addr += ssz;
   2036	}
   2037	ktime_get_ts64(&ts2);
   2038
   2039	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
   2040
   2041	return 0;
   2042}
   2043
   2044static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
   2045{
   2046	int ret, i;
   2047
   2048	for (i = 0; i < 10; i++) {
   2049		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
   2050		if (ret)
   2051			return ret;
   2052	}
   2053	for (i = 0; i < 5; i++) {
   2054		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
   2055		if (ret)
   2056			return ret;
   2057	}
   2058	for (i = 0; i < 3; i++) {
   2059		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
   2060		if (ret)
   2061			return ret;
   2062	}
   2063
   2064	return ret;
   2065}
   2066
   2067/*
   2068 * Large sequential read performance.
   2069 */
   2070static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
   2071{
   2072	return mmc_test_large_seq_perf(test, 0);
   2073}
   2074
   2075/*
   2076 * Large sequential write performance.
   2077 */
   2078static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
   2079{
   2080	return mmc_test_large_seq_perf(test, 1);
   2081}
   2082
   2083static int mmc_test_rw_multiple(struct mmc_test_card *test,
   2084				struct mmc_test_multiple_rw *tdata,
   2085				unsigned int reqsize, unsigned int size,
   2086				int min_sg_len)
   2087{
   2088	unsigned int dev_addr;
   2089	struct mmc_test_area *t = &test->area;
   2090	int ret = 0;
   2091
   2092	/* Set up test area */
   2093	if (size > mmc_test_capacity(test->card) / 2 * 512)
   2094		size = mmc_test_capacity(test->card) / 2 * 512;
   2095	if (reqsize > t->max_tfr)
   2096		reqsize = t->max_tfr;
   2097	dev_addr = mmc_test_capacity(test->card) / 4;
   2098	if ((dev_addr & 0xffff0000))
   2099		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
   2100	else
   2101		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
   2102	if (!dev_addr)
   2103		goto err;
   2104
   2105	if (reqsize > size)
   2106		return 0;
   2107
   2108	/* prepare test area */
   2109	if (mmc_can_erase(test->card) &&
   2110	    tdata->prepare & MMC_TEST_PREP_ERASE) {
   2111		ret = mmc_erase(test->card, dev_addr,
   2112				size / 512, test->card->erase_arg);
   2113		if (ret)
   2114			ret = mmc_erase(test->card, dev_addr,
   2115					size / 512, MMC_ERASE_ARG);
   2116		if (ret)
   2117			goto err;
   2118	}
   2119
   2120	/* Run test */
   2121	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
   2122				   tdata->do_write, 0, 1, size / reqsize,
   2123				   tdata->do_nonblock_req, min_sg_len);
   2124	if (ret)
   2125		goto err;
   2126
   2127	return ret;
   2128 err:
   2129	pr_info("[%s] error\n", __func__);
   2130	return ret;
   2131}
   2132
   2133static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
   2134				     struct mmc_test_multiple_rw *rw)
   2135{
   2136	int ret = 0;
   2137	int i;
   2138	void *pre_req = test->card->host->ops->pre_req;
   2139	void *post_req = test->card->host->ops->post_req;
   2140
   2141	if (rw->do_nonblock_req &&
   2142	    ((!pre_req && post_req) || (pre_req && !post_req))) {
   2143		pr_info("error: only one of pre/post is defined\n");
   2144		return -EINVAL;
   2145	}
   2146
   2147	for (i = 0 ; i < rw->len && ret == 0; i++) {
   2148		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
   2149		if (ret)
   2150			break;
   2151	}
   2152	return ret;
   2153}
   2154
   2155static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
   2156				       struct mmc_test_multiple_rw *rw)
   2157{
   2158	int ret = 0;
   2159	int i;
   2160
   2161	for (i = 0 ; i < rw->len && ret == 0; i++) {
   2162		ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
   2163					   rw->sg_len[i]);
   2164		if (ret)
   2165			break;
   2166	}
   2167	return ret;
   2168}
   2169
   2170/*
   2171 * Multiple blocking write 4k to 4 MB chunks
   2172 */
   2173static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
   2174{
   2175	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
   2176			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
   2177	struct mmc_test_multiple_rw test_data = {
   2178		.bs = bs,
   2179		.size = TEST_AREA_MAX_SIZE,
   2180		.len = ARRAY_SIZE(bs),
   2181		.do_write = true,
   2182		.do_nonblock_req = false,
   2183		.prepare = MMC_TEST_PREP_ERASE,
   2184	};
   2185
   2186	return mmc_test_rw_multiple_size(test, &test_data);
   2187};
   2188
   2189/*
   2190 * Multiple non-blocking write 4k to 4 MB chunks
   2191 */
   2192static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
   2193{
   2194	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
   2195			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
   2196	struct mmc_test_multiple_rw test_data = {
   2197		.bs = bs,
   2198		.size = TEST_AREA_MAX_SIZE,
   2199		.len = ARRAY_SIZE(bs),
   2200		.do_write = true,
   2201		.do_nonblock_req = true,
   2202		.prepare = MMC_TEST_PREP_ERASE,
   2203	};
   2204
   2205	return mmc_test_rw_multiple_size(test, &test_data);
   2206}
   2207
   2208/*
   2209 * Multiple blocking read 4k to 4 MB chunks
   2210 */
   2211static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
   2212{
   2213	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
   2214			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
   2215	struct mmc_test_multiple_rw test_data = {
   2216		.bs = bs,
   2217		.size = TEST_AREA_MAX_SIZE,
   2218		.len = ARRAY_SIZE(bs),
   2219		.do_write = false,
   2220		.do_nonblock_req = false,
   2221		.prepare = MMC_TEST_PREP_NONE,
   2222	};
   2223
   2224	return mmc_test_rw_multiple_size(test, &test_data);
   2225}
   2226
   2227/*
   2228 * Multiple non-blocking read 4k to 4 MB chunks
   2229 */
   2230static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
   2231{
   2232	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
   2233			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
   2234	struct mmc_test_multiple_rw test_data = {
   2235		.bs = bs,
   2236		.size = TEST_AREA_MAX_SIZE,
   2237		.len = ARRAY_SIZE(bs),
   2238		.do_write = false,
   2239		.do_nonblock_req = true,
   2240		.prepare = MMC_TEST_PREP_NONE,
   2241	};
   2242
   2243	return mmc_test_rw_multiple_size(test, &test_data);
   2244}
   2245
   2246/*
   2247 * Multiple blocking write 1 to 512 sg elements
   2248 */
   2249static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
   2250{
   2251	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
   2252				 1 << 7, 1 << 8, 1 << 9};
   2253	struct mmc_test_multiple_rw test_data = {
   2254		.sg_len = sg_len,
   2255		.size = TEST_AREA_MAX_SIZE,
   2256		.len = ARRAY_SIZE(sg_len),
   2257		.do_write = true,
   2258		.do_nonblock_req = false,
   2259		.prepare = MMC_TEST_PREP_ERASE,
   2260	};
   2261
   2262	return mmc_test_rw_multiple_sg_len(test, &test_data);
   2263};
   2264
   2265/*
   2266 * Multiple non-blocking write 1 to 512 sg elements
   2267 */
   2268static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
   2269{
   2270	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
   2271				 1 << 7, 1 << 8, 1 << 9};
   2272	struct mmc_test_multiple_rw test_data = {
   2273		.sg_len = sg_len,
   2274		.size = TEST_AREA_MAX_SIZE,
   2275		.len = ARRAY_SIZE(sg_len),
   2276		.do_write = true,
   2277		.do_nonblock_req = true,
   2278		.prepare = MMC_TEST_PREP_ERASE,
   2279	};
   2280
   2281	return mmc_test_rw_multiple_sg_len(test, &test_data);
   2282}
   2283
   2284/*
   2285 * Multiple blocking read 1 to 512 sg elements
   2286 */
   2287static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
   2288{
   2289	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
   2290				 1 << 7, 1 << 8, 1 << 9};
   2291	struct mmc_test_multiple_rw test_data = {
   2292		.sg_len = sg_len,
   2293		.size = TEST_AREA_MAX_SIZE,
   2294		.len = ARRAY_SIZE(sg_len),
   2295		.do_write = false,
   2296		.do_nonblock_req = false,
   2297		.prepare = MMC_TEST_PREP_NONE,
   2298	};
   2299
   2300	return mmc_test_rw_multiple_sg_len(test, &test_data);
   2301}
   2302
   2303/*
   2304 * Multiple non-blocking read 1 to 512 sg elements
   2305 */
   2306static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
   2307{
   2308	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
   2309				 1 << 7, 1 << 8, 1 << 9};
   2310	struct mmc_test_multiple_rw test_data = {
   2311		.sg_len = sg_len,
   2312		.size = TEST_AREA_MAX_SIZE,
   2313		.len = ARRAY_SIZE(sg_len),
   2314		.do_write = false,
   2315		.do_nonblock_req = true,
   2316		.prepare = MMC_TEST_PREP_NONE,
   2317	};
   2318
   2319	return mmc_test_rw_multiple_sg_len(test, &test_data);
   2320}
   2321
   2322/*
   2323 * eMMC hardware reset.
   2324 */
   2325static int mmc_test_reset(struct mmc_test_card *test)
   2326{
   2327	struct mmc_card *card = test->card;
   2328	int err;
   2329
   2330	err = mmc_hw_reset(card);
   2331	if (!err) {
   2332		/*
   2333		 * Reset will re-enable the card's command queue, but tests
   2334		 * expect it to be disabled.
   2335		 */
   2336		if (card->ext_csd.cmdq_en)
   2337			mmc_cmdq_disable(card);
   2338		return RESULT_OK;
   2339	} else if (err == -EOPNOTSUPP) {
   2340		return RESULT_UNSUP_HOST;
   2341	}
   2342
   2343	return RESULT_FAIL;
   2344}
   2345
   2346static int mmc_test_send_status(struct mmc_test_card *test,
   2347				struct mmc_command *cmd)
   2348{
   2349	memset(cmd, 0, sizeof(*cmd));
   2350
   2351	cmd->opcode = MMC_SEND_STATUS;
   2352	if (!mmc_host_is_spi(test->card->host))
   2353		cmd->arg = test->card->rca << 16;
   2354	cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
   2355
   2356	return mmc_wait_for_cmd(test->card->host, cmd, 0);
   2357}
   2358
   2359static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
   2360				     unsigned int dev_addr, int use_sbc,
   2361				     int repeat_cmd, int write, int use_areq)
   2362{
   2363	struct mmc_test_req *rq = mmc_test_req_alloc();
   2364	struct mmc_host *host = test->card->host;
   2365	struct mmc_test_area *t = &test->area;
   2366	struct mmc_request *mrq;
   2367	unsigned long timeout;
   2368	bool expired = false;
   2369	int ret = 0, cmd_ret;
   2370	u32 status = 0;
   2371	int count = 0;
   2372
   2373	if (!rq)
   2374		return -ENOMEM;
   2375
   2376	mrq = &rq->mrq;
   2377	if (use_sbc)
   2378		mrq->sbc = &rq->sbc;
   2379	mrq->cap_cmd_during_tfr = true;
   2380
   2381	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
   2382			     512, write);
   2383
   2384	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
   2385		ret =  mmc_host_cmd23(host) ?
   2386		       RESULT_UNSUP_CARD :
   2387		       RESULT_UNSUP_HOST;
   2388		goto out_free;
   2389	}
   2390
   2391	/* Start ongoing data request */
   2392	if (use_areq) {
   2393		ret = mmc_test_start_areq(test, mrq, NULL);
   2394		if (ret)
   2395			goto out_free;
   2396	} else {
   2397		mmc_wait_for_req(host, mrq);
   2398	}
   2399
   2400	timeout = jiffies + msecs_to_jiffies(3000);
   2401	do {
   2402		count += 1;
   2403
   2404		/* Send status command while data transfer in progress */
   2405		cmd_ret = mmc_test_send_status(test, &rq->status);
   2406		if (cmd_ret)
   2407			break;
   2408
   2409		status = rq->status.resp[0];
   2410		if (status & R1_ERROR) {
   2411			cmd_ret = -EIO;
   2412			break;
   2413		}
   2414
   2415		if (mmc_is_req_done(host, mrq))
   2416			break;
   2417
   2418		expired = time_after(jiffies, timeout);
   2419		if (expired) {
   2420			pr_info("%s: timeout waiting for Tran state status %#x\n",
   2421				mmc_hostname(host), status);
   2422			cmd_ret = -ETIMEDOUT;
   2423			break;
   2424		}
   2425	} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
   2426
   2427	/* Wait for data request to complete */
   2428	if (use_areq) {
   2429		ret = mmc_test_start_areq(test, NULL, mrq);
   2430	} else {
   2431		mmc_wait_for_req_done(test->card->host, mrq);
   2432	}
   2433
   2434	/*
   2435	 * For cap_cmd_during_tfr request, upper layer must send stop if
   2436	 * required.
   2437	 */
   2438	if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
   2439		if (ret)
   2440			mmc_wait_for_cmd(host, mrq->data->stop, 0);
   2441		else
   2442			ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
   2443	}
   2444
   2445	if (ret)
   2446		goto out_free;
   2447
   2448	if (cmd_ret) {
   2449		pr_info("%s: Send Status failed: status %#x, error %d\n",
   2450			mmc_hostname(test->card->host), status, cmd_ret);
   2451	}
   2452
   2453	ret = mmc_test_check_result(test, mrq);
   2454	if (ret)
   2455		goto out_free;
   2456
   2457	ret = mmc_test_wait_busy(test);
   2458	if (ret)
   2459		goto out_free;
   2460
   2461	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
   2462		pr_info("%s: %d commands completed during transfer of %u blocks\n",
   2463			mmc_hostname(test->card->host), count, t->blocks);
   2464
   2465	if (cmd_ret)
   2466		ret = cmd_ret;
   2467out_free:
   2468	kfree(rq);
   2469
   2470	return ret;
   2471}
   2472
   2473static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
   2474				      unsigned long sz, int use_sbc, int write,
   2475				      int use_areq)
   2476{
   2477	struct mmc_test_area *t = &test->area;
   2478	int ret;
   2479
   2480	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
   2481		return RESULT_UNSUP_HOST;
   2482
   2483	ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
   2484	if (ret)
   2485		return ret;
   2486
   2487	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
   2488					use_areq);
   2489	if (ret)
   2490		return ret;
   2491
   2492	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
   2493					 use_areq);
   2494}
   2495
   2496static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
   2497				    int write, int use_areq)
   2498{
   2499	struct mmc_test_area *t = &test->area;
   2500	unsigned long sz;
   2501	int ret;
   2502
   2503	for (sz = 512; sz <= t->max_tfr; sz += 512) {
   2504		ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
   2505						 use_areq);
   2506		if (ret)
   2507			return ret;
   2508	}
   2509	return 0;
   2510}
   2511
   2512/*
   2513 * Commands during read - no Set Block Count (CMD23).
   2514 */
   2515static int mmc_test_cmds_during_read(struct mmc_test_card *test)
   2516{
   2517	return mmc_test_cmds_during_tfr(test, 0, 0, 0);
   2518}
   2519
   2520/*
   2521 * Commands during write - no Set Block Count (CMD23).
   2522 */
   2523static int mmc_test_cmds_during_write(struct mmc_test_card *test)
   2524{
   2525	return mmc_test_cmds_during_tfr(test, 0, 1, 0);
   2526}
   2527
   2528/*
   2529 * Commands during read - use Set Block Count (CMD23).
   2530 */
   2531static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
   2532{
   2533	return mmc_test_cmds_during_tfr(test, 1, 0, 0);
   2534}
   2535
   2536/*
   2537 * Commands during write - use Set Block Count (CMD23).
   2538 */
   2539static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
   2540{
   2541	return mmc_test_cmds_during_tfr(test, 1, 1, 0);
   2542}
   2543
   2544/*
   2545 * Commands during non-blocking read - use Set Block Count (CMD23).
   2546 */
   2547static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
   2548{
   2549	return mmc_test_cmds_during_tfr(test, 1, 0, 1);
   2550}
   2551
   2552/*
   2553 * Commands during non-blocking write - use Set Block Count (CMD23).
   2554 */
   2555static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
   2556{
   2557	return mmc_test_cmds_during_tfr(test, 1, 1, 1);
   2558}
   2559
   2560static const struct mmc_test_case mmc_test_cases[] = {
   2561	{
   2562		.name = "Basic write (no data verification)",
   2563		.run = mmc_test_basic_write,
   2564	},
   2565
   2566	{
   2567		.name = "Basic read (no data verification)",
   2568		.run = mmc_test_basic_read,
   2569	},
   2570
   2571	{
   2572		.name = "Basic write (with data verification)",
   2573		.prepare = mmc_test_prepare_write,
   2574		.run = mmc_test_verify_write,
   2575		.cleanup = mmc_test_cleanup,
   2576	},
   2577
   2578	{
   2579		.name = "Basic read (with data verification)",
   2580		.prepare = mmc_test_prepare_read,
   2581		.run = mmc_test_verify_read,
   2582		.cleanup = mmc_test_cleanup,
   2583	},
   2584
   2585	{
   2586		.name = "Multi-block write",
   2587		.prepare = mmc_test_prepare_write,
   2588		.run = mmc_test_multi_write,
   2589		.cleanup = mmc_test_cleanup,
   2590	},
   2591
   2592	{
   2593		.name = "Multi-block read",
   2594		.prepare = mmc_test_prepare_read,
   2595		.run = mmc_test_multi_read,
   2596		.cleanup = mmc_test_cleanup,
   2597	},
   2598
   2599	{
   2600		.name = "Power of two block writes",
   2601		.prepare = mmc_test_prepare_write,
   2602		.run = mmc_test_pow2_write,
   2603		.cleanup = mmc_test_cleanup,
   2604	},
   2605
   2606	{
   2607		.name = "Power of two block reads",
   2608		.prepare = mmc_test_prepare_read,
   2609		.run = mmc_test_pow2_read,
   2610		.cleanup = mmc_test_cleanup,
   2611	},
   2612
   2613	{
   2614		.name = "Weird sized block writes",
   2615		.prepare = mmc_test_prepare_write,
   2616		.run = mmc_test_weird_write,
   2617		.cleanup = mmc_test_cleanup,
   2618	},
   2619
   2620	{
   2621		.name = "Weird sized block reads",
   2622		.prepare = mmc_test_prepare_read,
   2623		.run = mmc_test_weird_read,
   2624		.cleanup = mmc_test_cleanup,
   2625	},
   2626
   2627	{
   2628		.name = "Badly aligned write",
   2629		.prepare = mmc_test_prepare_write,
   2630		.run = mmc_test_align_write,
   2631		.cleanup = mmc_test_cleanup,
   2632	},
   2633
   2634	{
   2635		.name = "Badly aligned read",
   2636		.prepare = mmc_test_prepare_read,
   2637		.run = mmc_test_align_read,
   2638		.cleanup = mmc_test_cleanup,
   2639	},
   2640
   2641	{
   2642		.name = "Badly aligned multi-block write",
   2643		.prepare = mmc_test_prepare_write,
   2644		.run = mmc_test_align_multi_write,
   2645		.cleanup = mmc_test_cleanup,
   2646	},
   2647
   2648	{
   2649		.name = "Badly aligned multi-block read",
   2650		.prepare = mmc_test_prepare_read,
   2651		.run = mmc_test_align_multi_read,
   2652		.cleanup = mmc_test_cleanup,
   2653	},
   2654
   2655	{
   2656		.name = "Proper xfer_size at write (start failure)",
   2657		.run = mmc_test_xfersize_write,
   2658	},
   2659
   2660	{
   2661		.name = "Proper xfer_size at read (start failure)",
   2662		.run = mmc_test_xfersize_read,
   2663	},
   2664
   2665	{
   2666		.name = "Proper xfer_size at write (midway failure)",
   2667		.run = mmc_test_multi_xfersize_write,
   2668	},
   2669
   2670	{
   2671		.name = "Proper xfer_size at read (midway failure)",
   2672		.run = mmc_test_multi_xfersize_read,
   2673	},
   2674
   2675#ifdef CONFIG_HIGHMEM
   2676
   2677	{
   2678		.name = "Highmem write",
   2679		.prepare = mmc_test_prepare_write,
   2680		.run = mmc_test_write_high,
   2681		.cleanup = mmc_test_cleanup,
   2682	},
   2683
   2684	{
   2685		.name = "Highmem read",
   2686		.prepare = mmc_test_prepare_read,
   2687		.run = mmc_test_read_high,
   2688		.cleanup = mmc_test_cleanup,
   2689	},
   2690
   2691	{
   2692		.name = "Multi-block highmem write",
   2693		.prepare = mmc_test_prepare_write,
   2694		.run = mmc_test_multi_write_high,
   2695		.cleanup = mmc_test_cleanup,
   2696	},
   2697
   2698	{
   2699		.name = "Multi-block highmem read",
   2700		.prepare = mmc_test_prepare_read,
   2701		.run = mmc_test_multi_read_high,
   2702		.cleanup = mmc_test_cleanup,
   2703	},
   2704
   2705#else
   2706
   2707	{
   2708		.name = "Highmem write",
   2709		.run = mmc_test_no_highmem,
   2710	},
   2711
   2712	{
   2713		.name = "Highmem read",
   2714		.run = mmc_test_no_highmem,
   2715	},
   2716
   2717	{
   2718		.name = "Multi-block highmem write",
   2719		.run = mmc_test_no_highmem,
   2720	},
   2721
   2722	{
   2723		.name = "Multi-block highmem read",
   2724		.run = mmc_test_no_highmem,
   2725	},
   2726
   2727#endif /* CONFIG_HIGHMEM */
   2728
   2729	{
   2730		.name = "Best-case read performance",
   2731		.prepare = mmc_test_area_prepare_fill,
   2732		.run = mmc_test_best_read_performance,
   2733		.cleanup = mmc_test_area_cleanup,
   2734	},
   2735
   2736	{
   2737		.name = "Best-case write performance",
   2738		.prepare = mmc_test_area_prepare_erase,
   2739		.run = mmc_test_best_write_performance,
   2740		.cleanup = mmc_test_area_cleanup,
   2741	},
   2742
   2743	{
   2744		.name = "Best-case read performance into scattered pages",
   2745		.prepare = mmc_test_area_prepare_fill,
   2746		.run = mmc_test_best_read_perf_max_scatter,
   2747		.cleanup = mmc_test_area_cleanup,
   2748	},
   2749
   2750	{
   2751		.name = "Best-case write performance from scattered pages",
   2752		.prepare = mmc_test_area_prepare_erase,
   2753		.run = mmc_test_best_write_perf_max_scatter,
   2754		.cleanup = mmc_test_area_cleanup,
   2755	},
   2756
   2757	{
   2758		.name = "Single read performance by transfer size",
   2759		.prepare = mmc_test_area_prepare_fill,
   2760		.run = mmc_test_profile_read_perf,
   2761		.cleanup = mmc_test_area_cleanup,
   2762	},
   2763
   2764	{
   2765		.name = "Single write performance by transfer size",
   2766		.prepare = mmc_test_area_prepare,
   2767		.run = mmc_test_profile_write_perf,
   2768		.cleanup = mmc_test_area_cleanup,
   2769	},
   2770
   2771	{
   2772		.name = "Single trim performance by transfer size",
   2773		.prepare = mmc_test_area_prepare_fill,
   2774		.run = mmc_test_profile_trim_perf,
   2775		.cleanup = mmc_test_area_cleanup,
   2776	},
   2777
   2778	{
   2779		.name = "Consecutive read performance by transfer size",
   2780		.prepare = mmc_test_area_prepare_fill,
   2781		.run = mmc_test_profile_seq_read_perf,
   2782		.cleanup = mmc_test_area_cleanup,
   2783	},
   2784
   2785	{
   2786		.name = "Consecutive write performance by transfer size",
   2787		.prepare = mmc_test_area_prepare,
   2788		.run = mmc_test_profile_seq_write_perf,
   2789		.cleanup = mmc_test_area_cleanup,
   2790	},
   2791
   2792	{
   2793		.name = "Consecutive trim performance by transfer size",
   2794		.prepare = mmc_test_area_prepare,
   2795		.run = mmc_test_profile_seq_trim_perf,
   2796		.cleanup = mmc_test_area_cleanup,
   2797	},
   2798
   2799	{
   2800		.name = "Random read performance by transfer size",
   2801		.prepare = mmc_test_area_prepare,
   2802		.run = mmc_test_random_read_perf,
   2803		.cleanup = mmc_test_area_cleanup,
   2804	},
   2805
   2806	{
   2807		.name = "Random write performance by transfer size",
   2808		.prepare = mmc_test_area_prepare,
   2809		.run = mmc_test_random_write_perf,
   2810		.cleanup = mmc_test_area_cleanup,
   2811	},
   2812
   2813	{
   2814		.name = "Large sequential read into scattered pages",
   2815		.prepare = mmc_test_area_prepare,
   2816		.run = mmc_test_large_seq_read_perf,
   2817		.cleanup = mmc_test_area_cleanup,
   2818	},
   2819
   2820	{
   2821		.name = "Large sequential write from scattered pages",
   2822		.prepare = mmc_test_area_prepare,
   2823		.run = mmc_test_large_seq_write_perf,
   2824		.cleanup = mmc_test_area_cleanup,
   2825	},
   2826
   2827	{
   2828		.name = "Write performance with blocking req 4k to 4MB",
   2829		.prepare = mmc_test_area_prepare,
   2830		.run = mmc_test_profile_mult_write_blocking_perf,
   2831		.cleanup = mmc_test_area_cleanup,
   2832	},
   2833
   2834	{
   2835		.name = "Write performance with non-blocking req 4k to 4MB",
   2836		.prepare = mmc_test_area_prepare,
   2837		.run = mmc_test_profile_mult_write_nonblock_perf,
   2838		.cleanup = mmc_test_area_cleanup,
   2839	},
   2840
   2841	{
   2842		.name = "Read performance with blocking req 4k to 4MB",
   2843		.prepare = mmc_test_area_prepare,
   2844		.run = mmc_test_profile_mult_read_blocking_perf,
   2845		.cleanup = mmc_test_area_cleanup,
   2846	},
   2847
   2848	{
   2849		.name = "Read performance with non-blocking req 4k to 4MB",
   2850		.prepare = mmc_test_area_prepare,
   2851		.run = mmc_test_profile_mult_read_nonblock_perf,
   2852		.cleanup = mmc_test_area_cleanup,
   2853	},
   2854
   2855	{
   2856		.name = "Write performance blocking req 1 to 512 sg elems",
   2857		.prepare = mmc_test_area_prepare,
   2858		.run = mmc_test_profile_sglen_wr_blocking_perf,
   2859		.cleanup = mmc_test_area_cleanup,
   2860	},
   2861
   2862	{
   2863		.name = "Write performance non-blocking req 1 to 512 sg elems",
   2864		.prepare = mmc_test_area_prepare,
   2865		.run = mmc_test_profile_sglen_wr_nonblock_perf,
   2866		.cleanup = mmc_test_area_cleanup,
   2867	},
   2868
   2869	{
   2870		.name = "Read performance blocking req 1 to 512 sg elems",
   2871		.prepare = mmc_test_area_prepare,
   2872		.run = mmc_test_profile_sglen_r_blocking_perf,
   2873		.cleanup = mmc_test_area_cleanup,
   2874	},
   2875
   2876	{
   2877		.name = "Read performance non-blocking req 1 to 512 sg elems",
   2878		.prepare = mmc_test_area_prepare,
   2879		.run = mmc_test_profile_sglen_r_nonblock_perf,
   2880		.cleanup = mmc_test_area_cleanup,
   2881	},
   2882
   2883	{
   2884		.name = "Reset test",
   2885		.run = mmc_test_reset,
   2886	},
   2887
   2888	{
   2889		.name = "Commands during read - no Set Block Count (CMD23)",
   2890		.prepare = mmc_test_area_prepare,
   2891		.run = mmc_test_cmds_during_read,
   2892		.cleanup = mmc_test_area_cleanup,
   2893	},
   2894
   2895	{
   2896		.name = "Commands during write - no Set Block Count (CMD23)",
   2897		.prepare = mmc_test_area_prepare,
   2898		.run = mmc_test_cmds_during_write,
   2899		.cleanup = mmc_test_area_cleanup,
   2900	},
   2901
   2902	{
   2903		.name = "Commands during read - use Set Block Count (CMD23)",
   2904		.prepare = mmc_test_area_prepare,
   2905		.run = mmc_test_cmds_during_read_cmd23,
   2906		.cleanup = mmc_test_area_cleanup,
   2907	},
   2908
   2909	{
   2910		.name = "Commands during write - use Set Block Count (CMD23)",
   2911		.prepare = mmc_test_area_prepare,
   2912		.run = mmc_test_cmds_during_write_cmd23,
   2913		.cleanup = mmc_test_area_cleanup,
   2914	},
   2915
   2916	{
   2917		.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
   2918		.prepare = mmc_test_area_prepare,
   2919		.run = mmc_test_cmds_during_read_cmd23_nonblock,
   2920		.cleanup = mmc_test_area_cleanup,
   2921	},
   2922
   2923	{
   2924		.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
   2925		.prepare = mmc_test_area_prepare,
   2926		.run = mmc_test_cmds_during_write_cmd23_nonblock,
   2927		.cleanup = mmc_test_area_cleanup,
   2928	},
   2929};
   2930
   2931static DEFINE_MUTEX(mmc_test_lock);
   2932
   2933static LIST_HEAD(mmc_test_result);
   2934
   2935static void mmc_test_run(struct mmc_test_card *test, int testcase)
   2936{
   2937	int i, ret;
   2938
   2939	pr_info("%s: Starting tests of card %s...\n",
   2940		mmc_hostname(test->card->host), mmc_card_id(test->card));
   2941
   2942	mmc_claim_host(test->card->host);
   2943
   2944	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
   2945		struct mmc_test_general_result *gr;
   2946
   2947		if (testcase && ((i + 1) != testcase))
   2948			continue;
   2949
   2950		pr_info("%s: Test case %d. %s...\n",
   2951			mmc_hostname(test->card->host), i + 1,
   2952			mmc_test_cases[i].name);
   2953
   2954		if (mmc_test_cases[i].prepare) {
   2955			ret = mmc_test_cases[i].prepare(test);
   2956			if (ret) {
   2957				pr_info("%s: Result: Prepare stage failed! (%d)\n",
   2958					mmc_hostname(test->card->host),
   2959					ret);
   2960				continue;
   2961			}
   2962		}
   2963
   2964		gr = kzalloc(sizeof(*gr), GFP_KERNEL);
   2965		if (gr) {
   2966			INIT_LIST_HEAD(&gr->tr_lst);
   2967
   2968			/* Assign data what we know already */
   2969			gr->card = test->card;
   2970			gr->testcase = i;
   2971
   2972			/* Append container to global one */
   2973			list_add_tail(&gr->link, &mmc_test_result);
   2974
   2975			/*
   2976			 * Save the pointer to created container in our private
   2977			 * structure.
   2978			 */
   2979			test->gr = gr;
   2980		}
   2981
   2982		ret = mmc_test_cases[i].run(test);
   2983		switch (ret) {
   2984		case RESULT_OK:
   2985			pr_info("%s: Result: OK\n",
   2986				mmc_hostname(test->card->host));
   2987			break;
   2988		case RESULT_FAIL:
   2989			pr_info("%s: Result: FAILED\n",
   2990				mmc_hostname(test->card->host));
   2991			break;
   2992		case RESULT_UNSUP_HOST:
   2993			pr_info("%s: Result: UNSUPPORTED (by host)\n",
   2994				mmc_hostname(test->card->host));
   2995			break;
   2996		case RESULT_UNSUP_CARD:
   2997			pr_info("%s: Result: UNSUPPORTED (by card)\n",
   2998				mmc_hostname(test->card->host));
   2999			break;
   3000		default:
   3001			pr_info("%s: Result: ERROR (%d)\n",
   3002				mmc_hostname(test->card->host), ret);
   3003		}
   3004
   3005		/* Save the result */
   3006		if (gr)
   3007			gr->result = ret;
   3008
   3009		if (mmc_test_cases[i].cleanup) {
   3010			ret = mmc_test_cases[i].cleanup(test);
   3011			if (ret) {
   3012				pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
   3013					mmc_hostname(test->card->host),
   3014					ret);
   3015			}
   3016		}
   3017	}
   3018
   3019	mmc_release_host(test->card->host);
   3020
   3021	pr_info("%s: Tests completed.\n",
   3022		mmc_hostname(test->card->host));
   3023}
   3024
   3025static void mmc_test_free_result(struct mmc_card *card)
   3026{
   3027	struct mmc_test_general_result *gr, *grs;
   3028
   3029	mutex_lock(&mmc_test_lock);
   3030
   3031	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
   3032		struct mmc_test_transfer_result *tr, *trs;
   3033
   3034		if (card && gr->card != card)
   3035			continue;
   3036
   3037		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
   3038			list_del(&tr->link);
   3039			kfree(tr);
   3040		}
   3041
   3042		list_del(&gr->link);
   3043		kfree(gr);
   3044	}
   3045
   3046	mutex_unlock(&mmc_test_lock);
   3047}
   3048
   3049static LIST_HEAD(mmc_test_file_test);
   3050
   3051static int mtf_test_show(struct seq_file *sf, void *data)
   3052{
   3053	struct mmc_card *card = (struct mmc_card *)sf->private;
   3054	struct mmc_test_general_result *gr;
   3055
   3056	mutex_lock(&mmc_test_lock);
   3057
   3058	list_for_each_entry(gr, &mmc_test_result, link) {
   3059		struct mmc_test_transfer_result *tr;
   3060
   3061		if (gr->card != card)
   3062			continue;
   3063
   3064		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
   3065
   3066		list_for_each_entry(tr, &gr->tr_lst, link) {
   3067			seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
   3068				tr->count, tr->sectors,
   3069				(u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
   3070				tr->rate, tr->iops / 100, tr->iops % 100);
   3071		}
   3072	}
   3073
   3074	mutex_unlock(&mmc_test_lock);
   3075
   3076	return 0;
   3077}
   3078
   3079static int mtf_test_open(struct inode *inode, struct file *file)
   3080{
   3081	return single_open(file, mtf_test_show, inode->i_private);
   3082}
   3083
   3084static ssize_t mtf_test_write(struct file *file, const char __user *buf,
   3085	size_t count, loff_t *pos)
   3086{
   3087	struct seq_file *sf = (struct seq_file *)file->private_data;
   3088	struct mmc_card *card = (struct mmc_card *)sf->private;
   3089	struct mmc_test_card *test;
   3090	long testcase;
   3091	int ret;
   3092
   3093	ret = kstrtol_from_user(buf, count, 10, &testcase);
   3094	if (ret)
   3095		return ret;
   3096
   3097	test = kzalloc(sizeof(*test), GFP_KERNEL);
   3098	if (!test)
   3099		return -ENOMEM;
   3100
   3101	/*
   3102	 * Remove all test cases associated with given card. Thus we have only
   3103	 * actual data of the last run.
   3104	 */
   3105	mmc_test_free_result(card);
   3106
   3107	test->card = card;
   3108
   3109	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
   3110#ifdef CONFIG_HIGHMEM
   3111	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
   3112#endif
   3113
   3114#ifdef CONFIG_HIGHMEM
   3115	if (test->buffer && test->highmem) {
   3116#else
   3117	if (test->buffer) {
   3118#endif
   3119		mutex_lock(&mmc_test_lock);
   3120		mmc_test_run(test, testcase);
   3121		mutex_unlock(&mmc_test_lock);
   3122	}
   3123
   3124#ifdef CONFIG_HIGHMEM
   3125	__free_pages(test->highmem, BUFFER_ORDER);
   3126#endif
   3127	kfree(test->buffer);
   3128	kfree(test);
   3129
   3130	return count;
   3131}
   3132
   3133static const struct file_operations mmc_test_fops_test = {
   3134	.open		= mtf_test_open,
   3135	.read		= seq_read,
   3136	.write		= mtf_test_write,
   3137	.llseek		= seq_lseek,
   3138	.release	= single_release,
   3139};
   3140
   3141static int mtf_testlist_show(struct seq_file *sf, void *data)
   3142{
   3143	int i;
   3144
   3145	mutex_lock(&mmc_test_lock);
   3146
   3147	seq_puts(sf, "0:\tRun all tests\n");
   3148	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
   3149		seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
   3150
   3151	mutex_unlock(&mmc_test_lock);
   3152
   3153	return 0;
   3154}
   3155
   3156DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
   3157
   3158static void mmc_test_free_dbgfs_file(struct mmc_card *card)
   3159{
   3160	struct mmc_test_dbgfs_file *df, *dfs;
   3161
   3162	mutex_lock(&mmc_test_lock);
   3163
   3164	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
   3165		if (card && df->card != card)
   3166			continue;
   3167		debugfs_remove(df->file);
   3168		list_del(&df->link);
   3169		kfree(df);
   3170	}
   3171
   3172	mutex_unlock(&mmc_test_lock);
   3173}
   3174
   3175static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
   3176	const char *name, umode_t mode, const struct file_operations *fops)
   3177{
   3178	struct dentry *file = NULL;
   3179	struct mmc_test_dbgfs_file *df;
   3180
   3181	if (card->debugfs_root)
   3182		debugfs_create_file(name, mode, card->debugfs_root, card, fops);
   3183
   3184	df = kmalloc(sizeof(*df), GFP_KERNEL);
   3185	if (!df) {
   3186		debugfs_remove(file);
   3187		return -ENOMEM;
   3188	}
   3189
   3190	df->card = card;
   3191	df->file = file;
   3192
   3193	list_add(&df->link, &mmc_test_file_test);
   3194	return 0;
   3195}
   3196
   3197static int mmc_test_register_dbgfs_file(struct mmc_card *card)
   3198{
   3199	int ret;
   3200
   3201	mutex_lock(&mmc_test_lock);
   3202
   3203	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
   3204		&mmc_test_fops_test);
   3205	if (ret)
   3206		goto err;
   3207
   3208	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
   3209		&mtf_testlist_fops);
   3210	if (ret)
   3211		goto err;
   3212
   3213err:
   3214	mutex_unlock(&mmc_test_lock);
   3215
   3216	return ret;
   3217}
   3218
   3219static int mmc_test_probe(struct mmc_card *card)
   3220{
   3221	int ret;
   3222
   3223	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
   3224		return -ENODEV;
   3225
   3226	ret = mmc_test_register_dbgfs_file(card);
   3227	if (ret)
   3228		return ret;
   3229
   3230	if (card->ext_csd.cmdq_en) {
   3231		mmc_claim_host(card->host);
   3232		ret = mmc_cmdq_disable(card);
   3233		mmc_release_host(card->host);
   3234		if (ret)
   3235			return ret;
   3236	}
   3237
   3238	dev_info(&card->dev, "Card claimed for testing.\n");
   3239
   3240	return 0;
   3241}
   3242
   3243static void mmc_test_remove(struct mmc_card *card)
   3244{
   3245	if (card->reenable_cmdq) {
   3246		mmc_claim_host(card->host);
   3247		mmc_cmdq_enable(card);
   3248		mmc_release_host(card->host);
   3249	}
   3250	mmc_test_free_result(card);
   3251	mmc_test_free_dbgfs_file(card);
   3252}
   3253
   3254static struct mmc_driver mmc_driver = {
   3255	.drv		= {
   3256		.name	= "mmc_test",
   3257	},
   3258	.probe		= mmc_test_probe,
   3259	.remove		= mmc_test_remove,
   3260};
   3261
   3262static int __init mmc_test_init(void)
   3263{
   3264	return mmc_register_driver(&mmc_driver);
   3265}
   3266
   3267static void __exit mmc_test_exit(void)
   3268{
   3269	/* Clear stalled data if card is still plugged */
   3270	mmc_test_free_result(NULL);
   3271	mmc_test_free_dbgfs_file(NULL);
   3272
   3273	mmc_unregister_driver(&mmc_driver);
   3274}
   3275
   3276module_init(mmc_test_init);
   3277module_exit(mmc_test_exit);
   3278
   3279MODULE_LICENSE("GPL");
   3280MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
   3281MODULE_AUTHOR("Pierre Ossman");