cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dm-io.c (13203B)


      1/*
      2 * Copyright (C) 2003 Sistina Software
      3 * Copyright (C) 2006 Red Hat GmbH
      4 *
      5 * This file is released under the GPL.
      6 */
      7
      8#include "dm-core.h"
      9
     10#include <linux/device-mapper.h>
     11
     12#include <linux/bio.h>
     13#include <linux/completion.h>
     14#include <linux/mempool.h>
     15#include <linux/module.h>
     16#include <linux/sched.h>
     17#include <linux/slab.h>
     18#include <linux/dm-io.h>
     19
     20#define DM_MSG_PREFIX "io"
     21
     22#define DM_IO_MAX_REGIONS	BITS_PER_LONG
     23
     24struct dm_io_client {
     25	mempool_t pool;
     26	struct bio_set bios;
     27};
     28
     29/*
     30 * Aligning 'struct io' reduces the number of bits required to store
     31 * its address.  Refer to store_io_and_region_in_bio() below.
     32 */
     33struct io {
     34	unsigned long error_bits;
     35	atomic_t count;
     36	struct dm_io_client *client;
     37	io_notify_fn callback;
     38	void *context;
     39	void *vma_invalidate_address;
     40	unsigned long vma_invalidate_size;
     41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
     42
     43static struct kmem_cache *_dm_io_cache;
     44
     45/*
     46 * Create a client with mempool and bioset.
     47 */
     48struct dm_io_client *dm_io_client_create(void)
     49{
     50	struct dm_io_client *client;
     51	unsigned min_ios = dm_get_reserved_bio_based_ios();
     52	int ret;
     53
     54	client = kzalloc(sizeof(*client), GFP_KERNEL);
     55	if (!client)
     56		return ERR_PTR(-ENOMEM);
     57
     58	ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
     59	if (ret)
     60		goto bad;
     61
     62	ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
     63	if (ret)
     64		goto bad;
     65
     66	return client;
     67
     68   bad:
     69	mempool_exit(&client->pool);
     70	kfree(client);
     71	return ERR_PTR(ret);
     72}
     73EXPORT_SYMBOL(dm_io_client_create);
     74
     75void dm_io_client_destroy(struct dm_io_client *client)
     76{
     77	mempool_exit(&client->pool);
     78	bioset_exit(&client->bios);
     79	kfree(client);
     80}
     81EXPORT_SYMBOL(dm_io_client_destroy);
     82
     83/*-----------------------------------------------------------------
     84 * We need to keep track of which region a bio is doing io for.
     85 * To avoid a memory allocation to store just 5 or 6 bits, we
     86 * ensure the 'struct io' pointer is aligned so enough low bits are
     87 * always zero and then combine it with the region number directly in
     88 * bi_private.
     89 *---------------------------------------------------------------*/
     90static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
     91				       unsigned region)
     92{
     93	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
     94		DMCRIT("Unaligned struct io pointer %p", io);
     95		BUG();
     96	}
     97
     98	bio->bi_private = (void *)((unsigned long)io | region);
     99}
    100
    101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
    102				       unsigned *region)
    103{
    104	unsigned long val = (unsigned long)bio->bi_private;
    105
    106	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
    107	*region = val & (DM_IO_MAX_REGIONS - 1);
    108}
    109
    110/*-----------------------------------------------------------------
    111 * We need an io object to keep track of the number of bios that
    112 * have been dispatched for a particular io.
    113 *---------------------------------------------------------------*/
    114static void complete_io(struct io *io)
    115{
    116	unsigned long error_bits = io->error_bits;
    117	io_notify_fn fn = io->callback;
    118	void *context = io->context;
    119
    120	if (io->vma_invalidate_size)
    121		invalidate_kernel_vmap_range(io->vma_invalidate_address,
    122					     io->vma_invalidate_size);
    123
    124	mempool_free(io, &io->client->pool);
    125	fn(error_bits, context);
    126}
    127
    128static void dec_count(struct io *io, unsigned int region, blk_status_t error)
    129{
    130	if (error)
    131		set_bit(region, &io->error_bits);
    132
    133	if (atomic_dec_and_test(&io->count))
    134		complete_io(io);
    135}
    136
    137static void endio(struct bio *bio)
    138{
    139	struct io *io;
    140	unsigned region;
    141	blk_status_t error;
    142
    143	if (bio->bi_status && bio_data_dir(bio) == READ)
    144		zero_fill_bio(bio);
    145
    146	/*
    147	 * The bio destructor in bio_put() may use the io object.
    148	 */
    149	retrieve_io_and_region_from_bio(bio, &io, &region);
    150
    151	error = bio->bi_status;
    152	bio_put(bio);
    153
    154	dec_count(io, region, error);
    155}
    156
    157/*-----------------------------------------------------------------
    158 * These little objects provide an abstraction for getting a new
    159 * destination page for io.
    160 *---------------------------------------------------------------*/
    161struct dpages {
    162	void (*get_page)(struct dpages *dp,
    163			 struct page **p, unsigned long *len, unsigned *offset);
    164	void (*next_page)(struct dpages *dp);
    165
    166	union {
    167		unsigned context_u;
    168		struct bvec_iter context_bi;
    169	};
    170	void *context_ptr;
    171
    172	void *vma_invalidate_address;
    173	unsigned long vma_invalidate_size;
    174};
    175
    176/*
    177 * Functions for getting the pages from a list.
    178 */
    179static void list_get_page(struct dpages *dp,
    180		  struct page **p, unsigned long *len, unsigned *offset)
    181{
    182	unsigned o = dp->context_u;
    183	struct page_list *pl = (struct page_list *) dp->context_ptr;
    184
    185	*p = pl->page;
    186	*len = PAGE_SIZE - o;
    187	*offset = o;
    188}
    189
    190static void list_next_page(struct dpages *dp)
    191{
    192	struct page_list *pl = (struct page_list *) dp->context_ptr;
    193	dp->context_ptr = pl->next;
    194	dp->context_u = 0;
    195}
    196
    197static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
    198{
    199	dp->get_page = list_get_page;
    200	dp->next_page = list_next_page;
    201	dp->context_u = offset;
    202	dp->context_ptr = pl;
    203}
    204
    205/*
    206 * Functions for getting the pages from a bvec.
    207 */
    208static void bio_get_page(struct dpages *dp, struct page **p,
    209			 unsigned long *len, unsigned *offset)
    210{
    211	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
    212					     dp->context_bi);
    213
    214	*p = bvec.bv_page;
    215	*len = bvec.bv_len;
    216	*offset = bvec.bv_offset;
    217
    218	/* avoid figuring it out again in bio_next_page() */
    219	dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
    220}
    221
    222static void bio_next_page(struct dpages *dp)
    223{
    224	unsigned int len = (unsigned int)dp->context_bi.bi_sector;
    225
    226	bvec_iter_advance((struct bio_vec *)dp->context_ptr,
    227			  &dp->context_bi, len);
    228}
    229
    230static void bio_dp_init(struct dpages *dp, struct bio *bio)
    231{
    232	dp->get_page = bio_get_page;
    233	dp->next_page = bio_next_page;
    234
    235	/*
    236	 * We just use bvec iterator to retrieve pages, so it is ok to
    237	 * access the bvec table directly here
    238	 */
    239	dp->context_ptr = bio->bi_io_vec;
    240	dp->context_bi = bio->bi_iter;
    241}
    242
    243/*
    244 * Functions for getting the pages from a VMA.
    245 */
    246static void vm_get_page(struct dpages *dp,
    247		 struct page **p, unsigned long *len, unsigned *offset)
    248{
    249	*p = vmalloc_to_page(dp->context_ptr);
    250	*offset = dp->context_u;
    251	*len = PAGE_SIZE - dp->context_u;
    252}
    253
    254static void vm_next_page(struct dpages *dp)
    255{
    256	dp->context_ptr += PAGE_SIZE - dp->context_u;
    257	dp->context_u = 0;
    258}
    259
    260static void vm_dp_init(struct dpages *dp, void *data)
    261{
    262	dp->get_page = vm_get_page;
    263	dp->next_page = vm_next_page;
    264	dp->context_u = offset_in_page(data);
    265	dp->context_ptr = data;
    266}
    267
    268/*
    269 * Functions for getting the pages from kernel memory.
    270 */
    271static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
    272			unsigned *offset)
    273{
    274	*p = virt_to_page(dp->context_ptr);
    275	*offset = dp->context_u;
    276	*len = PAGE_SIZE - dp->context_u;
    277}
    278
    279static void km_next_page(struct dpages *dp)
    280{
    281	dp->context_ptr += PAGE_SIZE - dp->context_u;
    282	dp->context_u = 0;
    283}
    284
    285static void km_dp_init(struct dpages *dp, void *data)
    286{
    287	dp->get_page = km_get_page;
    288	dp->next_page = km_next_page;
    289	dp->context_u = offset_in_page(data);
    290	dp->context_ptr = data;
    291}
    292
    293/*-----------------------------------------------------------------
    294 * IO routines that accept a list of pages.
    295 *---------------------------------------------------------------*/
    296static void do_region(int op, int op_flags, unsigned region,
    297		      struct dm_io_region *where, struct dpages *dp,
    298		      struct io *io)
    299{
    300	struct bio *bio;
    301	struct page *page;
    302	unsigned long len;
    303	unsigned offset;
    304	unsigned num_bvecs;
    305	sector_t remaining = where->count;
    306	struct request_queue *q = bdev_get_queue(where->bdev);
    307	sector_t num_sectors;
    308	unsigned int special_cmd_max_sectors;
    309
    310	/*
    311	 * Reject unsupported discard and write same requests.
    312	 */
    313	if (op == REQ_OP_DISCARD)
    314		special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
    315	else if (op == REQ_OP_WRITE_ZEROES)
    316		special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
    317	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
    318	    special_cmd_max_sectors == 0) {
    319		atomic_inc(&io->count);
    320		dec_count(io, region, BLK_STS_NOTSUPP);
    321		return;
    322	}
    323
    324	/*
    325	 * where->count may be zero if op holds a flush and we need to
    326	 * send a zero-sized flush.
    327	 */
    328	do {
    329		/*
    330		 * Allocate a suitably sized-bio.
    331		 */
    332		switch (op) {
    333		case REQ_OP_DISCARD:
    334		case REQ_OP_WRITE_ZEROES:
    335			num_bvecs = 0;
    336			break;
    337		default:
    338			num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
    339						(PAGE_SIZE >> SECTOR_SHIFT)));
    340		}
    341
    342		bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
    343				       GFP_NOIO, &io->client->bios);
    344		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
    345		bio->bi_end_io = endio;
    346		store_io_and_region_in_bio(bio, io, region);
    347
    348		if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
    349			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
    350			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
    351			remaining -= num_sectors;
    352		} else while (remaining) {
    353			/*
    354			 * Try and add as many pages as possible.
    355			 */
    356			dp->get_page(dp, &page, &len, &offset);
    357			len = min(len, to_bytes(remaining));
    358			if (!bio_add_page(bio, page, len, offset))
    359				break;
    360
    361			offset = 0;
    362			remaining -= to_sector(len);
    363			dp->next_page(dp);
    364		}
    365
    366		atomic_inc(&io->count);
    367		submit_bio(bio);
    368	} while (remaining);
    369}
    370
    371static void dispatch_io(int op, int op_flags, unsigned int num_regions,
    372			struct dm_io_region *where, struct dpages *dp,
    373			struct io *io, int sync)
    374{
    375	int i;
    376	struct dpages old_pages = *dp;
    377
    378	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
    379
    380	if (sync)
    381		op_flags |= REQ_SYNC;
    382
    383	/*
    384	 * For multiple regions we need to be careful to rewind
    385	 * the dp object for each call to do_region.
    386	 */
    387	for (i = 0; i < num_regions; i++) {
    388		*dp = old_pages;
    389		if (where[i].count || (op_flags & REQ_PREFLUSH))
    390			do_region(op, op_flags, i, where + i, dp, io);
    391	}
    392
    393	/*
    394	 * Drop the extra reference that we were holding to avoid
    395	 * the io being completed too early.
    396	 */
    397	dec_count(io, 0, 0);
    398}
    399
    400struct sync_io {
    401	unsigned long error_bits;
    402	struct completion wait;
    403};
    404
    405static void sync_io_complete(unsigned long error, void *context)
    406{
    407	struct sync_io *sio = context;
    408
    409	sio->error_bits = error;
    410	complete(&sio->wait);
    411}
    412
    413static int sync_io(struct dm_io_client *client, unsigned int num_regions,
    414		   struct dm_io_region *where, int op, int op_flags,
    415		   struct dpages *dp, unsigned long *error_bits)
    416{
    417	struct io *io;
    418	struct sync_io sio;
    419
    420	if (num_regions > 1 && !op_is_write(op)) {
    421		WARN_ON(1);
    422		return -EIO;
    423	}
    424
    425	init_completion(&sio.wait);
    426
    427	io = mempool_alloc(&client->pool, GFP_NOIO);
    428	io->error_bits = 0;
    429	atomic_set(&io->count, 1); /* see dispatch_io() */
    430	io->client = client;
    431	io->callback = sync_io_complete;
    432	io->context = &sio;
    433
    434	io->vma_invalidate_address = dp->vma_invalidate_address;
    435	io->vma_invalidate_size = dp->vma_invalidate_size;
    436
    437	dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
    438
    439	wait_for_completion_io(&sio.wait);
    440
    441	if (error_bits)
    442		*error_bits = sio.error_bits;
    443
    444	return sio.error_bits ? -EIO : 0;
    445}
    446
    447static int async_io(struct dm_io_client *client, unsigned int num_regions,
    448		    struct dm_io_region *where, int op, int op_flags,
    449		    struct dpages *dp, io_notify_fn fn, void *context)
    450{
    451	struct io *io;
    452
    453	if (num_regions > 1 && !op_is_write(op)) {
    454		WARN_ON(1);
    455		fn(1, context);
    456		return -EIO;
    457	}
    458
    459	io = mempool_alloc(&client->pool, GFP_NOIO);
    460	io->error_bits = 0;
    461	atomic_set(&io->count, 1); /* see dispatch_io() */
    462	io->client = client;
    463	io->callback = fn;
    464	io->context = context;
    465
    466	io->vma_invalidate_address = dp->vma_invalidate_address;
    467	io->vma_invalidate_size = dp->vma_invalidate_size;
    468
    469	dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
    470	return 0;
    471}
    472
    473static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
    474		   unsigned long size)
    475{
    476	/* Set up dpages based on memory type */
    477
    478	dp->vma_invalidate_address = NULL;
    479	dp->vma_invalidate_size = 0;
    480
    481	switch (io_req->mem.type) {
    482	case DM_IO_PAGE_LIST:
    483		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
    484		break;
    485
    486	case DM_IO_BIO:
    487		bio_dp_init(dp, io_req->mem.ptr.bio);
    488		break;
    489
    490	case DM_IO_VMA:
    491		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
    492		if (io_req->bi_op == REQ_OP_READ) {
    493			dp->vma_invalidate_address = io_req->mem.ptr.vma;
    494			dp->vma_invalidate_size = size;
    495		}
    496		vm_dp_init(dp, io_req->mem.ptr.vma);
    497		break;
    498
    499	case DM_IO_KMEM:
    500		km_dp_init(dp, io_req->mem.ptr.addr);
    501		break;
    502
    503	default:
    504		return -EINVAL;
    505	}
    506
    507	return 0;
    508}
    509
    510int dm_io(struct dm_io_request *io_req, unsigned num_regions,
    511	  struct dm_io_region *where, unsigned long *sync_error_bits)
    512{
    513	int r;
    514	struct dpages dp;
    515
    516	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
    517	if (r)
    518		return r;
    519
    520	if (!io_req->notify.fn)
    521		return sync_io(io_req->client, num_regions, where,
    522			       io_req->bi_op, io_req->bi_op_flags, &dp,
    523			       sync_error_bits);
    524
    525	return async_io(io_req->client, num_regions, where, io_req->bi_op,
    526			io_req->bi_op_flags, &dp, io_req->notify.fn,
    527			io_req->notify.context);
    528}
    529EXPORT_SYMBOL(dm_io);
    530
    531int __init dm_io_init(void)
    532{
    533	_dm_io_cache = KMEM_CACHE(io, 0);
    534	if (!_dm_io_cache)
    535		return -ENOMEM;
    536
    537	return 0;
    538}
    539
    540void dm_io_exit(void)
    541{
    542	kmem_cache_destroy(_dm_io_cache);
    543	_dm_io_cache = NULL;
    544}