cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dm-zone.c (16798B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
      4 */
      5
      6#include <linux/blkdev.h>
      7#include <linux/mm.h>
      8#include <linux/sched/mm.h>
      9#include <linux/slab.h>
     10
     11#include "dm-core.h"
     12
     13#define DM_MSG_PREFIX "zone"
     14
     15#define DM_ZONE_INVALID_WP_OFST		UINT_MAX
     16
     17/*
     18 * For internal zone reports bypassing the top BIO submission path.
     19 */
     20static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t,
     21				  sector_t sector, unsigned int nr_zones,
     22				  report_zones_cb cb, void *data)
     23{
     24	struct gendisk *disk = md->disk;
     25	int ret;
     26	struct dm_report_zones_args args = {
     27		.next_sector = sector,
     28		.orig_data = data,
     29		.orig_cb = cb,
     30	};
     31
     32	do {
     33		struct dm_target *tgt;
     34
     35		tgt = dm_table_find_target(t, args.next_sector);
     36		if (WARN_ON_ONCE(!tgt->type->report_zones))
     37			return -EIO;
     38
     39		args.tgt = tgt;
     40		ret = tgt->type->report_zones(tgt, &args,
     41					      nr_zones - args.zone_idx);
     42		if (ret < 0)
     43			return ret;
     44	} while (args.zone_idx < nr_zones &&
     45		 args.next_sector < get_capacity(disk));
     46
     47	return args.zone_idx;
     48}
     49
     50/*
     51 * User facing dm device block device report zone operation. This calls the
     52 * report_zones operation for each target of a device table. This operation is
     53 * generally implemented by targets using dm_report_zones().
     54 */
     55int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
     56			unsigned int nr_zones, report_zones_cb cb, void *data)
     57{
     58	struct mapped_device *md = disk->private_data;
     59	struct dm_table *map;
     60	int srcu_idx, ret;
     61
     62	if (dm_suspended_md(md))
     63		return -EAGAIN;
     64
     65	map = dm_get_live_table(md, &srcu_idx);
     66	if (!map)
     67		return -EIO;
     68
     69	ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
     70
     71	dm_put_live_table(md, srcu_idx);
     72
     73	return ret;
     74}
     75
     76static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx,
     77			      void *data)
     78{
     79	struct dm_report_zones_args *args = data;
     80	sector_t sector_diff = args->tgt->begin - args->start;
     81
     82	/*
     83	 * Ignore zones beyond the target range.
     84	 */
     85	if (zone->start >= args->start + args->tgt->len)
     86		return 0;
     87
     88	/*
     89	 * Remap the start sector and write pointer position of the zone
     90	 * to match its position in the target range.
     91	 */
     92	zone->start += sector_diff;
     93	if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
     94		if (zone->cond == BLK_ZONE_COND_FULL)
     95			zone->wp = zone->start + zone->len;
     96		else if (zone->cond == BLK_ZONE_COND_EMPTY)
     97			zone->wp = zone->start;
     98		else
     99			zone->wp += sector_diff;
    100	}
    101
    102	args->next_sector = zone->start + zone->len;
    103	return args->orig_cb(zone, args->zone_idx++, args->orig_data);
    104}
    105
    106/*
    107 * Helper for drivers of zoned targets to implement struct target_type
    108 * report_zones operation.
    109 */
    110int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
    111		    struct dm_report_zones_args *args, unsigned int nr_zones)
    112{
    113	/*
    114	 * Set the target mapping start sector first so that
    115	 * dm_report_zones_cb() can correctly remap zone information.
    116	 */
    117	args->start = start;
    118
    119	return blkdev_report_zones(bdev, sector, nr_zones,
    120				   dm_report_zones_cb, args);
    121}
    122EXPORT_SYMBOL_GPL(dm_report_zones);
    123
    124bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
    125{
    126	struct request_queue *q = md->queue;
    127
    128	if (!blk_queue_is_zoned(q))
    129		return false;
    130
    131	switch (bio_op(bio)) {
    132	case REQ_OP_WRITE_ZEROES:
    133	case REQ_OP_WRITE:
    134		return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
    135	default:
    136		return false;
    137	}
    138}
    139
    140void dm_cleanup_zoned_dev(struct mapped_device *md)
    141{
    142	struct request_queue *q = md->queue;
    143
    144	if (q) {
    145		kfree(q->conv_zones_bitmap);
    146		q->conv_zones_bitmap = NULL;
    147		kfree(q->seq_zones_wlock);
    148		q->seq_zones_wlock = NULL;
    149	}
    150
    151	kvfree(md->zwp_offset);
    152	md->zwp_offset = NULL;
    153	md->nr_zones = 0;
    154}
    155
    156static unsigned int dm_get_zone_wp_offset(struct blk_zone *zone)
    157{
    158	switch (zone->cond) {
    159	case BLK_ZONE_COND_IMP_OPEN:
    160	case BLK_ZONE_COND_EXP_OPEN:
    161	case BLK_ZONE_COND_CLOSED:
    162		return zone->wp - zone->start;
    163	case BLK_ZONE_COND_FULL:
    164		return zone->len;
    165	case BLK_ZONE_COND_EMPTY:
    166	case BLK_ZONE_COND_NOT_WP:
    167	case BLK_ZONE_COND_OFFLINE:
    168	case BLK_ZONE_COND_READONLY:
    169	default:
    170		/*
    171		 * Conventional, offline and read-only zones do not have a valid
    172		 * write pointer. Use 0 as for an empty zone.
    173		 */
    174		return 0;
    175	}
    176}
    177
    178static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
    179				 void *data)
    180{
    181	struct mapped_device *md = data;
    182	struct request_queue *q = md->queue;
    183
    184	switch (zone->type) {
    185	case BLK_ZONE_TYPE_CONVENTIONAL:
    186		if (!q->conv_zones_bitmap) {
    187			q->conv_zones_bitmap =
    188				kcalloc(BITS_TO_LONGS(q->nr_zones),
    189					sizeof(unsigned long), GFP_NOIO);
    190			if (!q->conv_zones_bitmap)
    191				return -ENOMEM;
    192		}
    193		set_bit(idx, q->conv_zones_bitmap);
    194		break;
    195	case BLK_ZONE_TYPE_SEQWRITE_REQ:
    196	case BLK_ZONE_TYPE_SEQWRITE_PREF:
    197		if (!q->seq_zones_wlock) {
    198			q->seq_zones_wlock =
    199				kcalloc(BITS_TO_LONGS(q->nr_zones),
    200					sizeof(unsigned long), GFP_NOIO);
    201			if (!q->seq_zones_wlock)
    202				return -ENOMEM;
    203		}
    204		if (!md->zwp_offset) {
    205			md->zwp_offset =
    206				kvcalloc(q->nr_zones, sizeof(unsigned int),
    207					 GFP_KERNEL);
    208			if (!md->zwp_offset)
    209				return -ENOMEM;
    210		}
    211		md->zwp_offset[idx] = dm_get_zone_wp_offset(zone);
    212
    213		break;
    214	default:
    215		DMERR("Invalid zone type 0x%x at sectors %llu",
    216		      (int)zone->type, zone->start);
    217		return -ENODEV;
    218	}
    219
    220	return 0;
    221}
    222
    223/*
    224 * Revalidate the zones of a mapped device to initialize resource necessary
    225 * for zone append emulation. Note that we cannot simply use the block layer
    226 * blk_revalidate_disk_zones() function here as the mapped device is suspended
    227 * (this is called from __bind() context).
    228 */
    229static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
    230{
    231	struct request_queue *q = md->queue;
    232	unsigned int noio_flag;
    233	int ret;
    234
    235	/*
    236	 * Check if something changed. If yes, cleanup the current resources
    237	 * and reallocate everything.
    238	 */
    239	if (!q->nr_zones || q->nr_zones != md->nr_zones)
    240		dm_cleanup_zoned_dev(md);
    241	if (md->nr_zones)
    242		return 0;
    243
    244	/*
    245	 * Scan all zones to initialize everything. Ensure that all vmalloc
    246	 * operations in this context are done as if GFP_NOIO was specified.
    247	 */
    248	noio_flag = memalloc_noio_save();
    249	ret = dm_blk_do_report_zones(md, t, 0, q->nr_zones,
    250				     dm_zone_revalidate_cb, md);
    251	memalloc_noio_restore(noio_flag);
    252	if (ret < 0)
    253		goto err;
    254	if (ret != q->nr_zones) {
    255		ret = -EIO;
    256		goto err;
    257	}
    258
    259	md->nr_zones = q->nr_zones;
    260
    261	return 0;
    262
    263err:
    264	DMERR("Revalidate zones failed %d", ret);
    265	dm_cleanup_zoned_dev(md);
    266	return ret;
    267}
    268
    269static int device_not_zone_append_capable(struct dm_target *ti,
    270					  struct dm_dev *dev, sector_t start,
    271					  sector_t len, void *data)
    272{
    273	return !blk_queue_is_zoned(bdev_get_queue(dev->bdev));
    274}
    275
    276static bool dm_table_supports_zone_append(struct dm_table *t)
    277{
    278	struct dm_target *ti;
    279	unsigned int i;
    280
    281	for (i = 0; i < dm_table_get_num_targets(t); i++) {
    282		ti = dm_table_get_target(t, i);
    283
    284		if (ti->emulate_zone_append)
    285			return false;
    286
    287		if (!ti->type->iterate_devices ||
    288		    ti->type->iterate_devices(ti, device_not_zone_append_capable, NULL))
    289			return false;
    290	}
    291
    292	return true;
    293}
    294
    295int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
    296{
    297	struct mapped_device *md = t->md;
    298
    299	/*
    300	 * For a zoned target, the number of zones should be updated for the
    301	 * correct value to be exposed in sysfs queue/nr_zones.
    302	 */
    303	WARN_ON_ONCE(queue_is_mq(q));
    304	q->nr_zones = blkdev_nr_zones(md->disk);
    305
    306	/* Check if zone append is natively supported */
    307	if (dm_table_supports_zone_append(t)) {
    308		clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
    309		dm_cleanup_zoned_dev(md);
    310		return 0;
    311	}
    312
    313	/*
    314	 * Mark the mapped device as needing zone append emulation and
    315	 * initialize the emulation resources once the capacity is set.
    316	 */
    317	set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
    318	if (!get_capacity(md->disk))
    319		return 0;
    320
    321	return dm_revalidate_zones(md, t);
    322}
    323
    324static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
    325				       void *data)
    326{
    327	unsigned int *wp_offset = data;
    328
    329	*wp_offset = dm_get_zone_wp_offset(zone);
    330
    331	return 0;
    332}
    333
    334static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
    335				    unsigned int *wp_ofst)
    336{
    337	sector_t sector = zno * blk_queue_zone_sectors(md->queue);
    338	unsigned int noio_flag;
    339	struct dm_table *t;
    340	int srcu_idx, ret;
    341
    342	t = dm_get_live_table(md, &srcu_idx);
    343	if (!t)
    344		return -EIO;
    345
    346	/*
    347	 * Ensure that all memory allocations in this context are done as if
    348	 * GFP_NOIO was specified.
    349	 */
    350	noio_flag = memalloc_noio_save();
    351	ret = dm_blk_do_report_zones(md, t, sector, 1,
    352				     dm_update_zone_wp_offset_cb, wp_ofst);
    353	memalloc_noio_restore(noio_flag);
    354
    355	dm_put_live_table(md, srcu_idx);
    356
    357	if (ret != 1)
    358		return -EIO;
    359
    360	return 0;
    361}
    362
    363struct orig_bio_details {
    364	unsigned int op;
    365	unsigned int nr_sectors;
    366};
    367
    368/*
    369 * First phase of BIO mapping for targets with zone append emulation:
    370 * check all BIO that change a zone writer pointer and change zone
    371 * append operations into regular write operations.
    372 */
    373static bool dm_zone_map_bio_begin(struct mapped_device *md,
    374				  unsigned int zno, struct bio *clone)
    375{
    376	sector_t zsectors = blk_queue_zone_sectors(md->queue);
    377	unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
    378
    379	/*
    380	 * If the target zone is in an error state, recover by inspecting the
    381	 * zone to get its current write pointer position. Note that since the
    382	 * target zone is already locked, a BIO issuing context should never
    383	 * see the zone write in the DM_ZONE_UPDATING_WP_OFST state.
    384	 */
    385	if (zwp_offset == DM_ZONE_INVALID_WP_OFST) {
    386		if (dm_update_zone_wp_offset(md, zno, &zwp_offset))
    387			return false;
    388		WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
    389	}
    390
    391	switch (bio_op(clone)) {
    392	case REQ_OP_ZONE_RESET:
    393	case REQ_OP_ZONE_FINISH:
    394		return true;
    395	case REQ_OP_WRITE_ZEROES:
    396	case REQ_OP_WRITE:
    397		/* Writes must be aligned to the zone write pointer */
    398		if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
    399			return false;
    400		break;
    401	case REQ_OP_ZONE_APPEND:
    402		/*
    403		 * Change zone append operations into a non-mergeable regular
    404		 * writes directed at the current write pointer position of the
    405		 * target zone.
    406		 */
    407		clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
    408			(clone->bi_opf & (~REQ_OP_MASK));
    409		clone->bi_iter.bi_sector += zwp_offset;
    410		break;
    411	default:
    412		DMWARN_LIMIT("Invalid BIO operation");
    413		return false;
    414	}
    415
    416	/* Cannot write to a full zone */
    417	if (zwp_offset >= zsectors)
    418		return false;
    419
    420	return true;
    421}
    422
    423/*
    424 * Second phase of BIO mapping for targets with zone append emulation:
    425 * update the zone write pointer offset array to account for the additional
    426 * data written to a zone. Note that at this point, the remapped clone BIO
    427 * may already have completed, so we do not touch it.
    428 */
    429static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
    430					struct orig_bio_details *orig_bio_details,
    431					unsigned int nr_sectors)
    432{
    433	unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
    434
    435	/* The clone BIO may already have been completed and failed */
    436	if (zwp_offset == DM_ZONE_INVALID_WP_OFST)
    437		return BLK_STS_IOERR;
    438
    439	/* Update the zone wp offset */
    440	switch (orig_bio_details->op) {
    441	case REQ_OP_ZONE_RESET:
    442		WRITE_ONCE(md->zwp_offset[zno], 0);
    443		return BLK_STS_OK;
    444	case REQ_OP_ZONE_FINISH:
    445		WRITE_ONCE(md->zwp_offset[zno],
    446			   blk_queue_zone_sectors(md->queue));
    447		return BLK_STS_OK;
    448	case REQ_OP_WRITE_ZEROES:
    449	case REQ_OP_WRITE:
    450		WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
    451		return BLK_STS_OK;
    452	case REQ_OP_ZONE_APPEND:
    453		/*
    454		 * Check that the target did not truncate the write operation
    455		 * emulating a zone append.
    456		 */
    457		if (nr_sectors != orig_bio_details->nr_sectors) {
    458			DMWARN_LIMIT("Truncated write for zone append");
    459			return BLK_STS_IOERR;
    460		}
    461		WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
    462		return BLK_STS_OK;
    463	default:
    464		DMWARN_LIMIT("Invalid BIO operation");
    465		return BLK_STS_IOERR;
    466	}
    467}
    468
    469static inline void dm_zone_lock(struct request_queue *q,
    470				unsigned int zno, struct bio *clone)
    471{
    472	if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)))
    473		return;
    474
    475	wait_on_bit_lock_io(q->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
    476	bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED);
    477}
    478
    479static inline void dm_zone_unlock(struct request_queue *q,
    480				  unsigned int zno, struct bio *clone)
    481{
    482	if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
    483		return;
    484
    485	WARN_ON_ONCE(!test_bit(zno, q->seq_zones_wlock));
    486	clear_bit_unlock(zno, q->seq_zones_wlock);
    487	smp_mb__after_atomic();
    488	wake_up_bit(q->seq_zones_wlock, zno);
    489
    490	bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
    491}
    492
    493static bool dm_need_zone_wp_tracking(struct bio *bio)
    494{
    495	/*
    496	 * Special processing is not needed for operations that do not need the
    497	 * zone write lock, that is, all operations that target conventional
    498	 * zones and all operations that do not modify directly a sequential
    499	 * zone write pointer.
    500	 */
    501	if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
    502		return false;
    503	switch (bio_op(bio)) {
    504	case REQ_OP_WRITE_ZEROES:
    505	case REQ_OP_WRITE:
    506	case REQ_OP_ZONE_RESET:
    507	case REQ_OP_ZONE_FINISH:
    508	case REQ_OP_ZONE_APPEND:
    509		return bio_zone_is_seq(bio);
    510	default:
    511		return false;
    512	}
    513}
    514
    515/*
    516 * Special IO mapping for targets needing zone append emulation.
    517 */
    518int dm_zone_map_bio(struct dm_target_io *tio)
    519{
    520	struct dm_io *io = tio->io;
    521	struct dm_target *ti = tio->ti;
    522	struct mapped_device *md = io->md;
    523	struct request_queue *q = md->queue;
    524	struct bio *clone = &tio->clone;
    525	struct orig_bio_details orig_bio_details;
    526	unsigned int zno;
    527	blk_status_t sts;
    528	int r;
    529
    530	/*
    531	 * IOs that do not change a zone write pointer do not need
    532	 * any additional special processing.
    533	 */
    534	if (!dm_need_zone_wp_tracking(clone))
    535		return ti->type->map(ti, clone);
    536
    537	/* Lock the target zone */
    538	zno = bio_zone_no(clone);
    539	dm_zone_lock(q, zno, clone);
    540
    541	orig_bio_details.nr_sectors = bio_sectors(clone);
    542	orig_bio_details.op = bio_op(clone);
    543
    544	/*
    545	 * Check that the bio and the target zone write pointer offset are
    546	 * both valid, and if the bio is a zone append, remap it to a write.
    547	 */
    548	if (!dm_zone_map_bio_begin(md, zno, clone)) {
    549		dm_zone_unlock(q, zno, clone);
    550		return DM_MAPIO_KILL;
    551	}
    552
    553	/* Let the target do its work */
    554	r = ti->type->map(ti, clone);
    555	switch (r) {
    556	case DM_MAPIO_SUBMITTED:
    557		/*
    558		 * The target submitted the clone BIO. The target zone will
    559		 * be unlocked on completion of the clone.
    560		 */
    561		sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
    562					  *tio->len_ptr);
    563		break;
    564	case DM_MAPIO_REMAPPED:
    565		/*
    566		 * The target only remapped the clone BIO. In case of error,
    567		 * unlock the target zone here as the clone will not be
    568		 * submitted.
    569		 */
    570		sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
    571					  *tio->len_ptr);
    572		if (sts != BLK_STS_OK)
    573			dm_zone_unlock(q, zno, clone);
    574		break;
    575	case DM_MAPIO_REQUEUE:
    576	case DM_MAPIO_KILL:
    577	default:
    578		dm_zone_unlock(q, zno, clone);
    579		sts = BLK_STS_IOERR;
    580		break;
    581	}
    582
    583	if (sts != BLK_STS_OK)
    584		return DM_MAPIO_KILL;
    585
    586	return r;
    587}
    588
    589/*
    590 * IO completion callback called from clone_endio().
    591 */
    592void dm_zone_endio(struct dm_io *io, struct bio *clone)
    593{
    594	struct mapped_device *md = io->md;
    595	struct request_queue *q = md->queue;
    596	struct bio *orig_bio = io->orig_bio;
    597	unsigned int zwp_offset;
    598	unsigned int zno;
    599
    600	/*
    601	 * For targets that do not emulate zone append, we only need to
    602	 * handle native zone-append bios.
    603	 */
    604	if (!dm_emulate_zone_append(md)) {
    605		/*
    606		 * Get the offset within the zone of the written sector
    607		 * and add that to the original bio sector position.
    608		 */
    609		if (clone->bi_status == BLK_STS_OK &&
    610		    bio_op(clone) == REQ_OP_ZONE_APPEND) {
    611			sector_t mask = (sector_t)blk_queue_zone_sectors(q) - 1;
    612
    613			orig_bio->bi_iter.bi_sector +=
    614				clone->bi_iter.bi_sector & mask;
    615		}
    616
    617		return;
    618	}
    619
    620	/*
    621	 * For targets that do emulate zone append, if the clone BIO does not
    622	 * own the target zone write lock, we have nothing to do.
    623	 */
    624	if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
    625		return;
    626
    627	zno = bio_zone_no(orig_bio);
    628
    629	if (clone->bi_status != BLK_STS_OK) {
    630		/*
    631		 * BIOs that modify a zone write pointer may leave the zone
    632		 * in an unknown state in case of failure (e.g. the write
    633		 * pointer was only partially advanced). In this case, set
    634		 * the target zone write pointer as invalid unless it is
    635		 * already being updated.
    636		 */
    637		WRITE_ONCE(md->zwp_offset[zno], DM_ZONE_INVALID_WP_OFST);
    638	} else if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
    639		/*
    640		 * Get the written sector for zone append operation that were
    641		 * emulated using regular write operations.
    642		 */
    643		zwp_offset = READ_ONCE(md->zwp_offset[zno]);
    644		if (WARN_ON_ONCE(zwp_offset < bio_sectors(orig_bio)))
    645			WRITE_ONCE(md->zwp_offset[zno],
    646				   DM_ZONE_INVALID_WP_OFST);
    647		else
    648			orig_bio->bi_iter.bi_sector +=
    649				zwp_offset - bio_sectors(orig_bio);
    650	}
    651
    652	dm_zone_unlock(q, zno, clone);
    653}