cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtd_blkdevs.c (12462B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Interface to Linux block layer for MTD 'translation layers'.
      4 *
      5 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
      6 */
      7
      8#include <linux/kernel.h>
      9#include <linux/slab.h>
     10#include <linux/module.h>
     11#include <linux/list.h>
     12#include <linux/fs.h>
     13#include <linux/mtd/blktrans.h>
     14#include <linux/mtd/mtd.h>
     15#include <linux/blkdev.h>
     16#include <linux/blk-mq.h>
     17#include <linux/blkpg.h>
     18#include <linux/spinlock.h>
     19#include <linux/hdreg.h>
     20#include <linux/mutex.h>
     21#include <linux/uaccess.h>
     22
     23#include "mtdcore.h"
     24
     25static LIST_HEAD(blktrans_majors);
     26
     27static void blktrans_dev_release(struct kref *kref)
     28{
     29	struct mtd_blktrans_dev *dev =
     30		container_of(kref, struct mtd_blktrans_dev, ref);
     31
     32	blk_cleanup_disk(dev->disk);
     33	blk_mq_free_tag_set(dev->tag_set);
     34	kfree(dev->tag_set);
     35	list_del(&dev->list);
     36	kfree(dev);
     37}
     38
     39static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
     40{
     41	kref_put(&dev->ref, blktrans_dev_release);
     42}
     43
     44
     45static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
     46			       struct mtd_blktrans_dev *dev,
     47			       struct request *req)
     48{
     49	struct req_iterator iter;
     50	struct bio_vec bvec;
     51	unsigned long block, nsect;
     52	char *buf;
     53
     54	block = blk_rq_pos(req) << 9 >> tr->blkshift;
     55	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
     56
     57	switch (req_op(req)) {
     58	case REQ_OP_FLUSH:
     59		if (tr->flush(dev))
     60			return BLK_STS_IOERR;
     61		return BLK_STS_OK;
     62	case REQ_OP_DISCARD:
     63		if (tr->discard(dev, block, nsect))
     64			return BLK_STS_IOERR;
     65		return BLK_STS_OK;
     66	case REQ_OP_READ:
     67		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
     68		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
     69			if (tr->readsect(dev, block, buf)) {
     70				kunmap(bio_page(req->bio));
     71				return BLK_STS_IOERR;
     72			}
     73		}
     74		kunmap(bio_page(req->bio));
     75
     76		rq_for_each_segment(bvec, req, iter)
     77			flush_dcache_page(bvec.bv_page);
     78		return BLK_STS_OK;
     79	case REQ_OP_WRITE:
     80		if (!tr->writesect)
     81			return BLK_STS_IOERR;
     82
     83		rq_for_each_segment(bvec, req, iter)
     84			flush_dcache_page(bvec.bv_page);
     85
     86		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
     87		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
     88			if (tr->writesect(dev, block, buf)) {
     89				kunmap(bio_page(req->bio));
     90				return BLK_STS_IOERR;
     91			}
     92		}
     93		kunmap(bio_page(req->bio));
     94		return BLK_STS_OK;
     95	default:
     96		return BLK_STS_IOERR;
     97	}
     98}
     99
    100int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
    101{
    102	return dev->bg_stop;
    103}
    104EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
    105
    106static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
    107{
    108	struct request *rq;
    109
    110	rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
    111	if (rq) {
    112		list_del_init(&rq->queuelist);
    113		blk_mq_start_request(rq);
    114		return rq;
    115	}
    116
    117	return NULL;
    118}
    119
    120static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
    121	__releases(&dev->queue_lock)
    122	__acquires(&dev->queue_lock)
    123{
    124	struct mtd_blktrans_ops *tr = dev->tr;
    125	struct request *req = NULL;
    126	int background_done = 0;
    127
    128	while (1) {
    129		blk_status_t res;
    130
    131		dev->bg_stop = false;
    132		if (!req && !(req = mtd_next_request(dev))) {
    133			if (tr->background && !background_done) {
    134				spin_unlock_irq(&dev->queue_lock);
    135				mutex_lock(&dev->lock);
    136				tr->background(dev);
    137				mutex_unlock(&dev->lock);
    138				spin_lock_irq(&dev->queue_lock);
    139				/*
    140				 * Do background processing just once per idle
    141				 * period.
    142				 */
    143				background_done = !dev->bg_stop;
    144				continue;
    145			}
    146			break;
    147		}
    148
    149		spin_unlock_irq(&dev->queue_lock);
    150
    151		mutex_lock(&dev->lock);
    152		res = do_blktrans_request(dev->tr, dev, req);
    153		mutex_unlock(&dev->lock);
    154
    155		if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
    156			__blk_mq_end_request(req, res);
    157			req = NULL;
    158		}
    159
    160		background_done = 0;
    161		cond_resched();
    162		spin_lock_irq(&dev->queue_lock);
    163	}
    164}
    165
    166static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
    167				 const struct blk_mq_queue_data *bd)
    168{
    169	struct mtd_blktrans_dev *dev;
    170
    171	dev = hctx->queue->queuedata;
    172	if (!dev) {
    173		blk_mq_start_request(bd->rq);
    174		return BLK_STS_IOERR;
    175	}
    176
    177	spin_lock_irq(&dev->queue_lock);
    178	list_add_tail(&bd->rq->queuelist, &dev->rq_list);
    179	mtd_blktrans_work(dev);
    180	spin_unlock_irq(&dev->queue_lock);
    181
    182	return BLK_STS_OK;
    183}
    184
    185static int blktrans_open(struct block_device *bdev, fmode_t mode)
    186{
    187	struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
    188	int ret = 0;
    189
    190	kref_get(&dev->ref);
    191
    192	mutex_lock(&dev->lock);
    193
    194	if (dev->open)
    195		goto unlock;
    196
    197	__module_get(dev->tr->owner);
    198
    199	if (!dev->mtd)
    200		goto unlock;
    201
    202	if (dev->tr->open) {
    203		ret = dev->tr->open(dev);
    204		if (ret)
    205			goto error_put;
    206	}
    207
    208	ret = __get_mtd_device(dev->mtd);
    209	if (ret)
    210		goto error_release;
    211	dev->file_mode = mode;
    212
    213unlock:
    214	dev->open++;
    215	mutex_unlock(&dev->lock);
    216	return ret;
    217
    218error_release:
    219	if (dev->tr->release)
    220		dev->tr->release(dev);
    221error_put:
    222	module_put(dev->tr->owner);
    223	mutex_unlock(&dev->lock);
    224	blktrans_dev_put(dev);
    225	return ret;
    226}
    227
    228static void blktrans_release(struct gendisk *disk, fmode_t mode)
    229{
    230	struct mtd_blktrans_dev *dev = disk->private_data;
    231
    232	mutex_lock(&dev->lock);
    233
    234	if (--dev->open)
    235		goto unlock;
    236
    237	module_put(dev->tr->owner);
    238
    239	if (dev->mtd) {
    240		if (dev->tr->release)
    241			dev->tr->release(dev);
    242		__put_mtd_device(dev->mtd);
    243	}
    244unlock:
    245	mutex_unlock(&dev->lock);
    246	blktrans_dev_put(dev);
    247}
    248
    249static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
    250{
    251	struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
    252	int ret = -ENXIO;
    253
    254	mutex_lock(&dev->lock);
    255
    256	if (!dev->mtd)
    257		goto unlock;
    258
    259	ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
    260unlock:
    261	mutex_unlock(&dev->lock);
    262	return ret;
    263}
    264
    265static const struct block_device_operations mtd_block_ops = {
    266	.owner		= THIS_MODULE,
    267	.open		= blktrans_open,
    268	.release	= blktrans_release,
    269	.getgeo		= blktrans_getgeo,
    270};
    271
    272static const struct blk_mq_ops mtd_mq_ops = {
    273	.queue_rq	= mtd_queue_rq,
    274};
    275
    276int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
    277{
    278	struct mtd_blktrans_ops *tr = new->tr;
    279	struct mtd_blktrans_dev *d;
    280	int last_devnum = -1;
    281	struct gendisk *gd;
    282	int ret;
    283
    284	lockdep_assert_held(&mtd_table_mutex);
    285
    286	list_for_each_entry(d, &tr->devs, list) {
    287		if (new->devnum == -1) {
    288			/* Use first free number */
    289			if (d->devnum != last_devnum+1) {
    290				/* Found a free devnum. Plug it in here */
    291				new->devnum = last_devnum+1;
    292				list_add_tail(&new->list, &d->list);
    293				goto added;
    294			}
    295		} else if (d->devnum == new->devnum) {
    296			/* Required number taken */
    297			return -EBUSY;
    298		} else if (d->devnum > new->devnum) {
    299			/* Required number was free */
    300			list_add_tail(&new->list, &d->list);
    301			goto added;
    302		}
    303		last_devnum = d->devnum;
    304	}
    305
    306	ret = -EBUSY;
    307	if (new->devnum == -1)
    308		new->devnum = last_devnum+1;
    309
    310	/* Check that the device and any partitions will get valid
    311	 * minor numbers and that the disk naming code below can cope
    312	 * with this number. */
    313	if (new->devnum > (MINORMASK >> tr->part_bits) ||
    314	    (tr->part_bits && new->devnum >= 27 * 26))
    315		return ret;
    316
    317	list_add_tail(&new->list, &tr->devs);
    318 added:
    319
    320	mutex_init(&new->lock);
    321	kref_init(&new->ref);
    322	if (!tr->writesect)
    323		new->readonly = 1;
    324
    325	ret = -ENOMEM;
    326	new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
    327	if (!new->tag_set)
    328		goto out_list_del;
    329
    330	ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
    331			BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
    332	if (ret)
    333		goto out_kfree_tag_set;
    334
    335	/* Create gendisk */
    336	gd = blk_mq_alloc_disk(new->tag_set, new);
    337	if (IS_ERR(gd)) {
    338		ret = PTR_ERR(gd);
    339		goto out_free_tag_set;
    340	}
    341
    342	new->disk = gd;
    343	new->rq = new->disk->queue;
    344	gd->private_data = new;
    345	gd->major = tr->major;
    346	gd->first_minor = (new->devnum) << tr->part_bits;
    347	gd->minors = 1 << tr->part_bits;
    348	gd->fops = &mtd_block_ops;
    349
    350	if (tr->part_bits) {
    351		if (new->devnum < 26)
    352			snprintf(gd->disk_name, sizeof(gd->disk_name),
    353				 "%s%c", tr->name, 'a' + new->devnum);
    354		else
    355			snprintf(gd->disk_name, sizeof(gd->disk_name),
    356				 "%s%c%c", tr->name,
    357				 'a' - 1 + new->devnum / 26,
    358				 'a' + new->devnum % 26);
    359	} else {
    360		snprintf(gd->disk_name, sizeof(gd->disk_name),
    361			 "%s%d", tr->name, new->devnum);
    362		gd->flags |= GENHD_FL_NO_PART;
    363	}
    364
    365	set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
    366
    367	/* Create the request queue */
    368	spin_lock_init(&new->queue_lock);
    369	INIT_LIST_HEAD(&new->rq_list);
    370
    371	if (tr->flush)
    372		blk_queue_write_cache(new->rq, true, false);
    373
    374	blk_queue_logical_block_size(new->rq, tr->blksize);
    375
    376	blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
    377	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
    378
    379	if (tr->discard) {
    380		blk_queue_max_discard_sectors(new->rq, UINT_MAX);
    381		new->rq->limits.discard_granularity = tr->blksize;
    382	}
    383
    384	gd->queue = new->rq;
    385
    386	if (new->readonly)
    387		set_disk_ro(gd, 1);
    388
    389	ret = device_add_disk(&new->mtd->dev, gd, NULL);
    390	if (ret)
    391		goto out_cleanup_disk;
    392
    393	if (new->disk_attributes) {
    394		ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
    395					new->disk_attributes);
    396		WARN_ON(ret);
    397	}
    398	return 0;
    399
    400out_cleanup_disk:
    401	blk_cleanup_disk(new->disk);
    402out_free_tag_set:
    403	blk_mq_free_tag_set(new->tag_set);
    404out_kfree_tag_set:
    405	kfree(new->tag_set);
    406out_list_del:
    407	list_del(&new->list);
    408	return ret;
    409}
    410
    411int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
    412{
    413	unsigned long flags;
    414
    415	lockdep_assert_held(&mtd_table_mutex);
    416
    417	if (old->disk_attributes)
    418		sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
    419						old->disk_attributes);
    420
    421	/* Stop new requests to arrive */
    422	del_gendisk(old->disk);
    423
    424	/* Kill current requests */
    425	spin_lock_irqsave(&old->queue_lock, flags);
    426	old->rq->queuedata = NULL;
    427	spin_unlock_irqrestore(&old->queue_lock, flags);
    428
    429	/* freeze+quiesce queue to ensure all requests are flushed */
    430	blk_mq_freeze_queue(old->rq);
    431	blk_mq_quiesce_queue(old->rq);
    432	blk_mq_unquiesce_queue(old->rq);
    433	blk_mq_unfreeze_queue(old->rq);
    434
    435	/* If the device is currently open, tell trans driver to close it,
    436		then put mtd device, and don't touch it again */
    437	mutex_lock(&old->lock);
    438	if (old->open) {
    439		if (old->tr->release)
    440			old->tr->release(old);
    441		__put_mtd_device(old->mtd);
    442	}
    443
    444	old->mtd = NULL;
    445
    446	mutex_unlock(&old->lock);
    447	blktrans_dev_put(old);
    448	return 0;
    449}
    450
    451static void blktrans_notify_remove(struct mtd_info *mtd)
    452{
    453	struct mtd_blktrans_ops *tr;
    454	struct mtd_blktrans_dev *dev, *next;
    455
    456	list_for_each_entry(tr, &blktrans_majors, list)
    457		list_for_each_entry_safe(dev, next, &tr->devs, list)
    458			if (dev->mtd == mtd)
    459				tr->remove_dev(dev);
    460}
    461
    462static void blktrans_notify_add(struct mtd_info *mtd)
    463{
    464	struct mtd_blktrans_ops *tr;
    465
    466	if (mtd->type == MTD_ABSENT)
    467		return;
    468
    469	list_for_each_entry(tr, &blktrans_majors, list)
    470		tr->add_mtd(tr, mtd);
    471}
    472
    473static struct mtd_notifier blktrans_notifier = {
    474	.add = blktrans_notify_add,
    475	.remove = blktrans_notify_remove,
    476};
    477
    478int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
    479{
    480	struct mtd_info *mtd;
    481	int ret;
    482
    483	/* Register the notifier if/when the first device type is
    484	   registered, to prevent the link/init ordering from fucking
    485	   us over. */
    486	if (!blktrans_notifier.list.next)
    487		register_mtd_user(&blktrans_notifier);
    488
    489	ret = register_blkdev(tr->major, tr->name);
    490	if (ret < 0) {
    491		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
    492		       tr->name, tr->major, ret);
    493		return ret;
    494	}
    495
    496	if (ret)
    497		tr->major = ret;
    498
    499	tr->blkshift = ffs(tr->blksize) - 1;
    500
    501	INIT_LIST_HEAD(&tr->devs);
    502
    503	mutex_lock(&mtd_table_mutex);
    504	list_add(&tr->list, &blktrans_majors);
    505	mtd_for_each_device(mtd)
    506		if (mtd->type != MTD_ABSENT)
    507			tr->add_mtd(tr, mtd);
    508	mutex_unlock(&mtd_table_mutex);
    509	return 0;
    510}
    511
    512int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
    513{
    514	struct mtd_blktrans_dev *dev, *next;
    515
    516	mutex_lock(&mtd_table_mutex);
    517
    518	/* Remove it from the list of active majors */
    519	list_del(&tr->list);
    520
    521	list_for_each_entry_safe(dev, next, &tr->devs, list)
    522		tr->remove_dev(dev);
    523
    524	mutex_unlock(&mtd_table_mutex);
    525	unregister_blkdev(tr->major, tr->name);
    526
    527	BUG_ON(!list_empty(&tr->devs));
    528	return 0;
    529}
    530
    531static void __exit mtd_blktrans_exit(void)
    532{
    533	/* No race here -- if someone's currently in register_mtd_blktrans
    534	   we're screwed anyway. */
    535	if (blktrans_notifier.list.next)
    536		unregister_mtd_user(&blktrans_notifier);
    537}
    538
    539module_exit(mtd_blktrans_exit);
    540
    541EXPORT_SYMBOL_GPL(register_mtd_blktrans);
    542EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
    543EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
    544EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
    545
    546MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
    547MODULE_LICENSE("GPL");
    548MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");