cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

md-faulty.c (8800B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * faulty.c : Multiple Devices driver for Linux
      4 *
      5 * Copyright (C) 2004 Neil Brown
      6 *
      7 * fautly-device-simulator personality for md
      8 */
      9
     10
     11/*
     12 * The "faulty" personality causes some requests to fail.
     13 *
     14 * Possible failure modes are:
     15 *   reads fail "randomly" but succeed on retry
     16 *   writes fail "randomly" but succeed on retry
     17 *   reads for some address fail and then persist until a write
     18 *   reads for some address fail and then persist irrespective of write
     19 *   writes for some address fail and persist
     20 *   all writes fail
     21 *
     22 * Different modes can be active at a time, but only
     23 * one can be set at array creation.  Others can be added later.
     24 * A mode can be one-shot or recurrent with the recurrence being
     25 * once in every N requests.
     26 * The bottom 5 bits of the "layout" indicate the mode.  The
     27 * remainder indicate a period, or 0 for one-shot.
     28 *
     29 * There is an implementation limit on the number of concurrently
     30 * persisting-faulty blocks. When a new fault is requested that would
     31 * exceed the limit, it is ignored.
     32 * All current faults can be clear using a layout of "0".
     33 *
     34 * Requests are always sent to the device.  If they are to fail,
     35 * we clone the bio and insert a new b_end_io into the chain.
     36 */
     37
     38#define	WriteTransient	0
     39#define	ReadTransient	1
     40#define	WritePersistent	2
     41#define	ReadPersistent	3
     42#define	WriteAll	4 /* doesn't go to device */
     43#define	ReadFixable	5
     44#define	Modes	6
     45
     46#define	ClearErrors	31
     47#define	ClearFaults	30
     48
     49#define AllPersist	100 /* internal use only */
     50#define	NoPersist	101
     51
     52#define	ModeMask	0x1f
     53#define	ModeShift	5
     54
     55#define MaxFault	50
     56#include <linux/blkdev.h>
     57#include <linux/module.h>
     58#include <linux/raid/md_u.h>
     59#include <linux/slab.h>
     60#include "md.h"
     61#include <linux/seq_file.h>
     62
     63
     64static void faulty_fail(struct bio *bio)
     65{
     66	struct bio *b = bio->bi_private;
     67
     68	b->bi_iter.bi_size = bio->bi_iter.bi_size;
     69	b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
     70
     71	bio_put(bio);
     72
     73	bio_io_error(b);
     74}
     75
     76struct faulty_conf {
     77	int period[Modes];
     78	atomic_t counters[Modes];
     79	sector_t faults[MaxFault];
     80	int	modes[MaxFault];
     81	int nfaults;
     82	struct md_rdev *rdev;
     83};
     84
     85static int check_mode(struct faulty_conf *conf, int mode)
     86{
     87	if (conf->period[mode] == 0 &&
     88	    atomic_read(&conf->counters[mode]) <= 0)
     89		return 0; /* no failure, no decrement */
     90
     91
     92	if (atomic_dec_and_test(&conf->counters[mode])) {
     93		if (conf->period[mode])
     94			atomic_set(&conf->counters[mode], conf->period[mode]);
     95		return 1;
     96	}
     97	return 0;
     98}
     99
    100static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
    101{
    102	/* If we find a ReadFixable sector, we fix it ... */
    103	int i;
    104	for (i=0; i<conf->nfaults; i++)
    105		if (conf->faults[i] >= start &&
    106		    conf->faults[i] < end) {
    107			/* found it ... */
    108			switch (conf->modes[i] * 2 + dir) {
    109			case WritePersistent*2+WRITE: return 1;
    110			case ReadPersistent*2+READ: return 1;
    111			case ReadFixable*2+READ: return 1;
    112			case ReadFixable*2+WRITE:
    113				conf->modes[i] = NoPersist;
    114				return 0;
    115			case AllPersist*2+READ:
    116			case AllPersist*2+WRITE: return 1;
    117			default:
    118				return 0;
    119			}
    120		}
    121	return 0;
    122}
    123
    124static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
    125{
    126	int i;
    127	int n = conf->nfaults;
    128	for (i=0; i<conf->nfaults; i++)
    129		if (conf->faults[i] == start) {
    130			switch(mode) {
    131			case NoPersist: conf->modes[i] = mode; return;
    132			case WritePersistent:
    133				if (conf->modes[i] == ReadPersistent ||
    134				    conf->modes[i] == ReadFixable)
    135					conf->modes[i] = AllPersist;
    136				else
    137					conf->modes[i] = WritePersistent;
    138				return;
    139			case ReadPersistent:
    140				if (conf->modes[i] == WritePersistent)
    141					conf->modes[i] = AllPersist;
    142				else
    143					conf->modes[i] = ReadPersistent;
    144				return;
    145			case ReadFixable:
    146				if (conf->modes[i] == WritePersistent ||
    147				    conf->modes[i] == ReadPersistent)
    148					conf->modes[i] = AllPersist;
    149				else
    150					conf->modes[i] = ReadFixable;
    151				return;
    152			}
    153		} else if (conf->modes[i] == NoPersist)
    154			n = i;
    155
    156	if (n >= MaxFault)
    157		return;
    158	conf->faults[n] = start;
    159	conf->modes[n] = mode;
    160	if (conf->nfaults == n)
    161		conf->nfaults = n+1;
    162}
    163
    164static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
    165{
    166	struct faulty_conf *conf = mddev->private;
    167	int failit = 0;
    168
    169	if (bio_data_dir(bio) == WRITE) {
    170		/* write request */
    171		if (atomic_read(&conf->counters[WriteAll])) {
    172			/* special case - don't decrement, don't submit_bio_noacct,
    173			 * just fail immediately
    174			 */
    175			bio_io_error(bio);
    176			return true;
    177		}
    178
    179		if (check_sector(conf, bio->bi_iter.bi_sector,
    180				 bio_end_sector(bio), WRITE))
    181			failit = 1;
    182		if (check_mode(conf, WritePersistent)) {
    183			add_sector(conf, bio->bi_iter.bi_sector,
    184				   WritePersistent);
    185			failit = 1;
    186		}
    187		if (check_mode(conf, WriteTransient))
    188			failit = 1;
    189	} else {
    190		/* read request */
    191		if (check_sector(conf, bio->bi_iter.bi_sector,
    192				 bio_end_sector(bio), READ))
    193			failit = 1;
    194		if (check_mode(conf, ReadTransient))
    195			failit = 1;
    196		if (check_mode(conf, ReadPersistent)) {
    197			add_sector(conf, bio->bi_iter.bi_sector,
    198				   ReadPersistent);
    199			failit = 1;
    200		}
    201		if (check_mode(conf, ReadFixable)) {
    202			add_sector(conf, bio->bi_iter.bi_sector,
    203				   ReadFixable);
    204			failit = 1;
    205		}
    206	}
    207	if (failit) {
    208		struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
    209						&mddev->bio_set);
    210
    211		b->bi_private = bio;
    212		b->bi_end_io = faulty_fail;
    213		bio = b;
    214	} else
    215		bio_set_dev(bio, conf->rdev->bdev);
    216
    217	submit_bio_noacct(bio);
    218	return true;
    219}
    220
    221static void faulty_status(struct seq_file *seq, struct mddev *mddev)
    222{
    223	struct faulty_conf *conf = mddev->private;
    224	int n;
    225
    226	if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
    227		seq_printf(seq, " WriteTransient=%d(%d)",
    228			   n, conf->period[WriteTransient]);
    229
    230	if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
    231		seq_printf(seq, " ReadTransient=%d(%d)",
    232			   n, conf->period[ReadTransient]);
    233
    234	if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
    235		seq_printf(seq, " WritePersistent=%d(%d)",
    236			   n, conf->period[WritePersistent]);
    237
    238	if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
    239		seq_printf(seq, " ReadPersistent=%d(%d)",
    240			   n, conf->period[ReadPersistent]);
    241
    242
    243	if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
    244		seq_printf(seq, " ReadFixable=%d(%d)",
    245			   n, conf->period[ReadFixable]);
    246
    247	if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
    248		seq_printf(seq, " WriteAll");
    249
    250	seq_printf(seq, " nfaults=%d", conf->nfaults);
    251}
    252
    253
    254static int faulty_reshape(struct mddev *mddev)
    255{
    256	int mode = mddev->new_layout & ModeMask;
    257	int count = mddev->new_layout >> ModeShift;
    258	struct faulty_conf *conf = mddev->private;
    259
    260	if (mddev->new_layout < 0)
    261		return 0;
    262
    263	/* new layout */
    264	if (mode == ClearFaults)
    265		conf->nfaults = 0;
    266	else if (mode == ClearErrors) {
    267		int i;
    268		for (i=0 ; i < Modes ; i++) {
    269			conf->period[i] = 0;
    270			atomic_set(&conf->counters[i], 0);
    271		}
    272	} else if (mode < Modes) {
    273		conf->period[mode] = count;
    274		if (!count) count++;
    275		atomic_set(&conf->counters[mode], count);
    276	} else
    277		return -EINVAL;
    278	mddev->new_layout = -1;
    279	mddev->layout = -1; /* makes sure further changes come through */
    280	return 0;
    281}
    282
    283static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
    284{
    285	WARN_ONCE(raid_disks,
    286		  "%s does not support generic reshape\n", __func__);
    287
    288	if (sectors == 0)
    289		return mddev->dev_sectors;
    290
    291	return sectors;
    292}
    293
    294static int faulty_run(struct mddev *mddev)
    295{
    296	struct md_rdev *rdev;
    297	int i;
    298	struct faulty_conf *conf;
    299
    300	if (md_check_no_bitmap(mddev))
    301		return -EINVAL;
    302
    303	conf = kmalloc(sizeof(*conf), GFP_KERNEL);
    304	if (!conf)
    305		return -ENOMEM;
    306
    307	for (i=0; i<Modes; i++) {
    308		atomic_set(&conf->counters[i], 0);
    309		conf->period[i] = 0;
    310	}
    311	conf->nfaults = 0;
    312
    313	rdev_for_each(rdev, mddev) {
    314		conf->rdev = rdev;
    315		disk_stack_limits(mddev->gendisk, rdev->bdev,
    316				  rdev->data_offset << 9);
    317	}
    318
    319	md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
    320	mddev->private = conf;
    321
    322	faulty_reshape(mddev);
    323
    324	return 0;
    325}
    326
    327static void faulty_free(struct mddev *mddev, void *priv)
    328{
    329	struct faulty_conf *conf = priv;
    330
    331	kfree(conf);
    332}
    333
    334static struct md_personality faulty_personality =
    335{
    336	.name		= "faulty",
    337	.level		= LEVEL_FAULTY,
    338	.owner		= THIS_MODULE,
    339	.make_request	= faulty_make_request,
    340	.run		= faulty_run,
    341	.free		= faulty_free,
    342	.status		= faulty_status,
    343	.check_reshape	= faulty_reshape,
    344	.size		= faulty_size,
    345};
    346
    347static int __init raid_init(void)
    348{
    349	return register_md_personality(&faulty_personality);
    350}
    351
    352static void raid_exit(void)
    353{
    354	unregister_md_personality(&faulty_personality);
    355}
    356
    357module_init(raid_init);
    358module_exit(raid_exit);
    359MODULE_LICENSE("GPL");
    360MODULE_DESCRIPTION("Fault injection personality for MD (deprecated)");
    361MODULE_ALIAS("md-personality-10"); /* faulty */
    362MODULE_ALIAS("md-faulty");
    363MODULE_ALIAS("md-level--5");