cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-mq-sysfs.c (7168B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/kernel.h>
      3#include <linux/module.h>
      4#include <linux/backing-dev.h>
      5#include <linux/bio.h>
      6#include <linux/blkdev.h>
      7#include <linux/mm.h>
      8#include <linux/init.h>
      9#include <linux/slab.h>
     10#include <linux/workqueue.h>
     11#include <linux/smp.h>
     12
     13#include <linux/blk-mq.h>
     14#include "blk.h"
     15#include "blk-mq.h"
     16#include "blk-mq-tag.h"
     17
     18static void blk_mq_sysfs_release(struct kobject *kobj)
     19{
     20	struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
     21
     22	free_percpu(ctxs->queue_ctx);
     23	kfree(ctxs);
     24}
     25
     26static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
     27{
     28	struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
     29
     30	/* ctx->ctxs won't be released until all ctx are freed */
     31	kobject_put(&ctx->ctxs->kobj);
     32}
     33
     34static void blk_mq_hw_sysfs_release(struct kobject *kobj)
     35{
     36	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
     37						  kobj);
     38
     39	blk_free_flush_queue(hctx->fq);
     40	sbitmap_free(&hctx->ctx_map);
     41	free_cpumask_var(hctx->cpumask);
     42	kfree(hctx->ctxs);
     43	kfree(hctx);
     44}
     45
     46struct blk_mq_hw_ctx_sysfs_entry {
     47	struct attribute attr;
     48	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
     49	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
     50};
     51
     52static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
     53				    struct attribute *attr, char *page)
     54{
     55	struct blk_mq_hw_ctx_sysfs_entry *entry;
     56	struct blk_mq_hw_ctx *hctx;
     57	struct request_queue *q;
     58	ssize_t res;
     59
     60	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
     61	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
     62	q = hctx->queue;
     63
     64	if (!entry->show)
     65		return -EIO;
     66
     67	mutex_lock(&q->sysfs_lock);
     68	res = entry->show(hctx, page);
     69	mutex_unlock(&q->sysfs_lock);
     70	return res;
     71}
     72
     73static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
     74				     struct attribute *attr, const char *page,
     75				     size_t length)
     76{
     77	struct blk_mq_hw_ctx_sysfs_entry *entry;
     78	struct blk_mq_hw_ctx *hctx;
     79	struct request_queue *q;
     80	ssize_t res;
     81
     82	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
     83	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
     84	q = hctx->queue;
     85
     86	if (!entry->store)
     87		return -EIO;
     88
     89	mutex_lock(&q->sysfs_lock);
     90	res = entry->store(hctx, page, length);
     91	mutex_unlock(&q->sysfs_lock);
     92	return res;
     93}
     94
     95static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
     96					    char *page)
     97{
     98	return sprintf(page, "%u\n", hctx->tags->nr_tags);
     99}
    100
    101static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
    102						     char *page)
    103{
    104	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
    105}
    106
    107static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
    108{
    109	const size_t size = PAGE_SIZE - 1;
    110	unsigned int i, first = 1;
    111	int ret = 0, pos = 0;
    112
    113	for_each_cpu(i, hctx->cpumask) {
    114		if (first)
    115			ret = snprintf(pos + page, size - pos, "%u", i);
    116		else
    117			ret = snprintf(pos + page, size - pos, ", %u", i);
    118
    119		if (ret >= size - pos)
    120			break;
    121
    122		first = 0;
    123		pos += ret;
    124	}
    125
    126	ret = snprintf(pos + page, size + 1 - pos, "\n");
    127	return pos + ret;
    128}
    129
    130static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
    131	.attr = {.name = "nr_tags", .mode = 0444 },
    132	.show = blk_mq_hw_sysfs_nr_tags_show,
    133};
    134static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
    135	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
    136	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
    137};
    138static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
    139	.attr = {.name = "cpu_list", .mode = 0444 },
    140	.show = blk_mq_hw_sysfs_cpus_show,
    141};
    142
    143static struct attribute *default_hw_ctx_attrs[] = {
    144	&blk_mq_hw_sysfs_nr_tags.attr,
    145	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
    146	&blk_mq_hw_sysfs_cpus.attr,
    147	NULL,
    148};
    149ATTRIBUTE_GROUPS(default_hw_ctx);
    150
    151static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
    152	.show	= blk_mq_hw_sysfs_show,
    153	.store	= blk_mq_hw_sysfs_store,
    154};
    155
    156static struct kobj_type blk_mq_ktype = {
    157	.release	= blk_mq_sysfs_release,
    158};
    159
    160static struct kobj_type blk_mq_ctx_ktype = {
    161	.release	= blk_mq_ctx_sysfs_release,
    162};
    163
    164static struct kobj_type blk_mq_hw_ktype = {
    165	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
    166	.default_groups = default_hw_ctx_groups,
    167	.release	= blk_mq_hw_sysfs_release,
    168};
    169
    170static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
    171{
    172	struct blk_mq_ctx *ctx;
    173	int i;
    174
    175	if (!hctx->nr_ctx)
    176		return;
    177
    178	hctx_for_each_ctx(hctx, ctx, i)
    179		kobject_del(&ctx->kobj);
    180
    181	kobject_del(&hctx->kobj);
    182}
    183
    184static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
    185{
    186	struct request_queue *q = hctx->queue;
    187	struct blk_mq_ctx *ctx;
    188	int i, ret;
    189
    190	if (!hctx->nr_ctx)
    191		return 0;
    192
    193	ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
    194	if (ret)
    195		return ret;
    196
    197	hctx_for_each_ctx(hctx, ctx, i) {
    198		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
    199		if (ret)
    200			break;
    201	}
    202
    203	return ret;
    204}
    205
    206void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
    207{
    208	struct blk_mq_hw_ctx *hctx;
    209	unsigned long i;
    210
    211	lockdep_assert_held(&q->sysfs_dir_lock);
    212
    213	queue_for_each_hw_ctx(q, hctx, i)
    214		blk_mq_unregister_hctx(hctx);
    215
    216	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
    217	kobject_del(q->mq_kobj);
    218	kobject_put(&dev->kobj);
    219
    220	q->mq_sysfs_init_done = false;
    221}
    222
    223void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
    224{
    225	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
    226}
    227
    228void blk_mq_sysfs_deinit(struct request_queue *q)
    229{
    230	struct blk_mq_ctx *ctx;
    231	int cpu;
    232
    233	for_each_possible_cpu(cpu) {
    234		ctx = per_cpu_ptr(q->queue_ctx, cpu);
    235		kobject_put(&ctx->kobj);
    236	}
    237	kobject_put(q->mq_kobj);
    238}
    239
    240void blk_mq_sysfs_init(struct request_queue *q)
    241{
    242	struct blk_mq_ctx *ctx;
    243	int cpu;
    244
    245	kobject_init(q->mq_kobj, &blk_mq_ktype);
    246
    247	for_each_possible_cpu(cpu) {
    248		ctx = per_cpu_ptr(q->queue_ctx, cpu);
    249
    250		kobject_get(q->mq_kobj);
    251		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
    252	}
    253}
    254
    255int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
    256{
    257	struct blk_mq_hw_ctx *hctx;
    258	unsigned long i, j;
    259	int ret;
    260
    261	WARN_ON_ONCE(!q->kobj.parent);
    262	lockdep_assert_held(&q->sysfs_dir_lock);
    263
    264	ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
    265	if (ret < 0)
    266		goto out;
    267
    268	kobject_uevent(q->mq_kobj, KOBJ_ADD);
    269
    270	queue_for_each_hw_ctx(q, hctx, i) {
    271		ret = blk_mq_register_hctx(hctx);
    272		if (ret)
    273			goto unreg;
    274	}
    275
    276	q->mq_sysfs_init_done = true;
    277
    278out:
    279	return ret;
    280
    281unreg:
    282	queue_for_each_hw_ctx(q, hctx, j) {
    283		if (j < i)
    284			blk_mq_unregister_hctx(hctx);
    285	}
    286
    287	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
    288	kobject_del(q->mq_kobj);
    289	kobject_put(&dev->kobj);
    290	return ret;
    291}
    292
    293void blk_mq_sysfs_unregister(struct request_queue *q)
    294{
    295	struct blk_mq_hw_ctx *hctx;
    296	unsigned long i;
    297
    298	mutex_lock(&q->sysfs_dir_lock);
    299	if (!q->mq_sysfs_init_done)
    300		goto unlock;
    301
    302	queue_for_each_hw_ctx(q, hctx, i)
    303		blk_mq_unregister_hctx(hctx);
    304
    305unlock:
    306	mutex_unlock(&q->sysfs_dir_lock);
    307}
    308
    309int blk_mq_sysfs_register(struct request_queue *q)
    310{
    311	struct blk_mq_hw_ctx *hctx;
    312	unsigned long i;
    313	int ret = 0;
    314
    315	mutex_lock(&q->sysfs_dir_lock);
    316	if (!q->mq_sysfs_init_done)
    317		goto unlock;
    318
    319	queue_for_each_hw_ctx(q, hctx, i) {
    320		ret = blk_mq_register_hctx(hctx);
    321		if (ret)
    322			break;
    323	}
    324
    325unlock:
    326	mutex_unlock(&q->sysfs_dir_lock);
    327
    328	return ret;
    329}