cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-mq-debugfs.c (22117B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2017 Facebook
      4 */
      5
      6#include <linux/kernel.h>
      7#include <linux/blkdev.h>
      8#include <linux/debugfs.h>
      9
     10#include <linux/blk-mq.h>
     11#include "blk.h"
     12#include "blk-mq.h"
     13#include "blk-mq-debugfs.h"
     14#include "blk-mq-sched.h"
     15#include "blk-mq-tag.h"
     16#include "blk-rq-qos.h"
     17
     18static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
     19{
     20	if (stat->nr_samples) {
     21		seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
     22			   stat->nr_samples, stat->mean, stat->min, stat->max);
     23	} else {
     24		seq_puts(m, "samples=0");
     25	}
     26}
     27
     28static int queue_poll_stat_show(void *data, struct seq_file *m)
     29{
     30	struct request_queue *q = data;
     31	int bucket;
     32
     33	if (!q->poll_stat)
     34		return 0;
     35
     36	for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
     37		seq_printf(m, "read  (%d Bytes): ", 1 << (9 + bucket));
     38		print_stat(m, &q->poll_stat[2 * bucket]);
     39		seq_puts(m, "\n");
     40
     41		seq_printf(m, "write (%d Bytes): ",  1 << (9 + bucket));
     42		print_stat(m, &q->poll_stat[2 * bucket + 1]);
     43		seq_puts(m, "\n");
     44	}
     45	return 0;
     46}
     47
     48static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
     49	__acquires(&q->requeue_lock)
     50{
     51	struct request_queue *q = m->private;
     52
     53	spin_lock_irq(&q->requeue_lock);
     54	return seq_list_start(&q->requeue_list, *pos);
     55}
     56
     57static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
     58{
     59	struct request_queue *q = m->private;
     60
     61	return seq_list_next(v, &q->requeue_list, pos);
     62}
     63
     64static void queue_requeue_list_stop(struct seq_file *m, void *v)
     65	__releases(&q->requeue_lock)
     66{
     67	struct request_queue *q = m->private;
     68
     69	spin_unlock_irq(&q->requeue_lock);
     70}
     71
     72static const struct seq_operations queue_requeue_list_seq_ops = {
     73	.start	= queue_requeue_list_start,
     74	.next	= queue_requeue_list_next,
     75	.stop	= queue_requeue_list_stop,
     76	.show	= blk_mq_debugfs_rq_show,
     77};
     78
     79static int blk_flags_show(struct seq_file *m, const unsigned long flags,
     80			  const char *const *flag_name, int flag_name_count)
     81{
     82	bool sep = false;
     83	int i;
     84
     85	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
     86		if (!(flags & BIT(i)))
     87			continue;
     88		if (sep)
     89			seq_puts(m, "|");
     90		sep = true;
     91		if (i < flag_name_count && flag_name[i])
     92			seq_puts(m, flag_name[i]);
     93		else
     94			seq_printf(m, "%d", i);
     95	}
     96	return 0;
     97}
     98
     99static int queue_pm_only_show(void *data, struct seq_file *m)
    100{
    101	struct request_queue *q = data;
    102
    103	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
    104	return 0;
    105}
    106
    107#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
    108static const char *const blk_queue_flag_name[] = {
    109	QUEUE_FLAG_NAME(STOPPED),
    110	QUEUE_FLAG_NAME(DYING),
    111	QUEUE_FLAG_NAME(NOMERGES),
    112	QUEUE_FLAG_NAME(SAME_COMP),
    113	QUEUE_FLAG_NAME(FAIL_IO),
    114	QUEUE_FLAG_NAME(NONROT),
    115	QUEUE_FLAG_NAME(IO_STAT),
    116	QUEUE_FLAG_NAME(NOXMERGES),
    117	QUEUE_FLAG_NAME(ADD_RANDOM),
    118	QUEUE_FLAG_NAME(SAME_FORCE),
    119	QUEUE_FLAG_NAME(DEAD),
    120	QUEUE_FLAG_NAME(INIT_DONE),
    121	QUEUE_FLAG_NAME(STABLE_WRITES),
    122	QUEUE_FLAG_NAME(POLL),
    123	QUEUE_FLAG_NAME(WC),
    124	QUEUE_FLAG_NAME(FUA),
    125	QUEUE_FLAG_NAME(DAX),
    126	QUEUE_FLAG_NAME(STATS),
    127	QUEUE_FLAG_NAME(REGISTERED),
    128	QUEUE_FLAG_NAME(QUIESCED),
    129	QUEUE_FLAG_NAME(PCI_P2PDMA),
    130	QUEUE_FLAG_NAME(ZONE_RESETALL),
    131	QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
    132	QUEUE_FLAG_NAME(HCTX_ACTIVE),
    133	QUEUE_FLAG_NAME(NOWAIT),
    134};
    135#undef QUEUE_FLAG_NAME
    136
    137static int queue_state_show(void *data, struct seq_file *m)
    138{
    139	struct request_queue *q = data;
    140
    141	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
    142		       ARRAY_SIZE(blk_queue_flag_name));
    143	seq_puts(m, "\n");
    144	return 0;
    145}
    146
    147static ssize_t queue_state_write(void *data, const char __user *buf,
    148				 size_t count, loff_t *ppos)
    149{
    150	struct request_queue *q = data;
    151	char opbuf[16] = { }, *op;
    152
    153	/*
    154	 * The "state" attribute is removed after blk_cleanup_queue() has called
    155	 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
    156	 * triggering a use-after-free.
    157	 */
    158	if (blk_queue_dead(q))
    159		return -ENOENT;
    160
    161	if (count >= sizeof(opbuf)) {
    162		pr_err("%s: operation too long\n", __func__);
    163		goto inval;
    164	}
    165
    166	if (copy_from_user(opbuf, buf, count))
    167		return -EFAULT;
    168	op = strstrip(opbuf);
    169	if (strcmp(op, "run") == 0) {
    170		blk_mq_run_hw_queues(q, true);
    171	} else if (strcmp(op, "start") == 0) {
    172		blk_mq_start_stopped_hw_queues(q, true);
    173	} else if (strcmp(op, "kick") == 0) {
    174		blk_mq_kick_requeue_list(q);
    175	} else {
    176		pr_err("%s: unsupported operation '%s'\n", __func__, op);
    177inval:
    178		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
    179		return -EINVAL;
    180	}
    181	return count;
    182}
    183
    184static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
    185	{ "poll_stat", 0400, queue_poll_stat_show },
    186	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
    187	{ "pm_only", 0600, queue_pm_only_show, NULL },
    188	{ "state", 0600, queue_state_show, queue_state_write },
    189	{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
    190	{ },
    191};
    192
    193#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
    194static const char *const hctx_state_name[] = {
    195	HCTX_STATE_NAME(STOPPED),
    196	HCTX_STATE_NAME(TAG_ACTIVE),
    197	HCTX_STATE_NAME(SCHED_RESTART),
    198	HCTX_STATE_NAME(INACTIVE),
    199};
    200#undef HCTX_STATE_NAME
    201
    202static int hctx_state_show(void *data, struct seq_file *m)
    203{
    204	struct blk_mq_hw_ctx *hctx = data;
    205
    206	blk_flags_show(m, hctx->state, hctx_state_name,
    207		       ARRAY_SIZE(hctx_state_name));
    208	seq_puts(m, "\n");
    209	return 0;
    210}
    211
    212#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
    213static const char *const alloc_policy_name[] = {
    214	BLK_TAG_ALLOC_NAME(FIFO),
    215	BLK_TAG_ALLOC_NAME(RR),
    216};
    217#undef BLK_TAG_ALLOC_NAME
    218
    219#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
    220static const char *const hctx_flag_name[] = {
    221	HCTX_FLAG_NAME(SHOULD_MERGE),
    222	HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
    223	HCTX_FLAG_NAME(BLOCKING),
    224	HCTX_FLAG_NAME(NO_SCHED),
    225	HCTX_FLAG_NAME(STACKING),
    226	HCTX_FLAG_NAME(TAG_HCTX_SHARED),
    227};
    228#undef HCTX_FLAG_NAME
    229
    230static int hctx_flags_show(void *data, struct seq_file *m)
    231{
    232	struct blk_mq_hw_ctx *hctx = data;
    233	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
    234
    235	seq_puts(m, "alloc_policy=");
    236	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
    237	    alloc_policy_name[alloc_policy])
    238		seq_puts(m, alloc_policy_name[alloc_policy]);
    239	else
    240		seq_printf(m, "%d", alloc_policy);
    241	seq_puts(m, " ");
    242	blk_flags_show(m,
    243		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
    244		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
    245	seq_puts(m, "\n");
    246	return 0;
    247}
    248
    249#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
    250static const char *const cmd_flag_name[] = {
    251	CMD_FLAG_NAME(FAILFAST_DEV),
    252	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
    253	CMD_FLAG_NAME(FAILFAST_DRIVER),
    254	CMD_FLAG_NAME(SYNC),
    255	CMD_FLAG_NAME(META),
    256	CMD_FLAG_NAME(PRIO),
    257	CMD_FLAG_NAME(NOMERGE),
    258	CMD_FLAG_NAME(IDLE),
    259	CMD_FLAG_NAME(INTEGRITY),
    260	CMD_FLAG_NAME(FUA),
    261	CMD_FLAG_NAME(PREFLUSH),
    262	CMD_FLAG_NAME(RAHEAD),
    263	CMD_FLAG_NAME(BACKGROUND),
    264	CMD_FLAG_NAME(NOWAIT),
    265	CMD_FLAG_NAME(NOUNMAP),
    266	CMD_FLAG_NAME(POLLED),
    267};
    268#undef CMD_FLAG_NAME
    269
    270#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
    271static const char *const rqf_name[] = {
    272	RQF_NAME(STARTED),
    273	RQF_NAME(SOFTBARRIER),
    274	RQF_NAME(FLUSH_SEQ),
    275	RQF_NAME(MIXED_MERGE),
    276	RQF_NAME(MQ_INFLIGHT),
    277	RQF_NAME(DONTPREP),
    278	RQF_NAME(FAILED),
    279	RQF_NAME(QUIET),
    280	RQF_NAME(ELVPRIV),
    281	RQF_NAME(IO_STAT),
    282	RQF_NAME(PM),
    283	RQF_NAME(HASHED),
    284	RQF_NAME(STATS),
    285	RQF_NAME(SPECIAL_PAYLOAD),
    286	RQF_NAME(ZONE_WRITE_LOCKED),
    287	RQF_NAME(MQ_POLL_SLEPT),
    288	RQF_NAME(ELV),
    289};
    290#undef RQF_NAME
    291
    292static const char *const blk_mq_rq_state_name_array[] = {
    293	[MQ_RQ_IDLE]		= "idle",
    294	[MQ_RQ_IN_FLIGHT]	= "in_flight",
    295	[MQ_RQ_COMPLETE]	= "complete",
    296};
    297
    298static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
    299{
    300	if (WARN_ON_ONCE((unsigned int)rq_state >=
    301			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
    302		return "(?)";
    303	return blk_mq_rq_state_name_array[rq_state];
    304}
    305
    306int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
    307{
    308	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
    309	const unsigned int op = req_op(rq);
    310	const char *op_str = blk_op_str(op);
    311
    312	seq_printf(m, "%p {.op=", rq);
    313	if (strcmp(op_str, "UNKNOWN") == 0)
    314		seq_printf(m, "%u", op);
    315	else
    316		seq_printf(m, "%s", op_str);
    317	seq_puts(m, ", .cmd_flags=");
    318	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
    319		       ARRAY_SIZE(cmd_flag_name));
    320	seq_puts(m, ", .rq_flags=");
    321	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
    322		       ARRAY_SIZE(rqf_name));
    323	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
    324	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
    325		   rq->internal_tag);
    326	if (mq_ops->show_rq)
    327		mq_ops->show_rq(m, rq);
    328	seq_puts(m, "}\n");
    329	return 0;
    330}
    331EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
    332
    333int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
    334{
    335	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
    336}
    337EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
    338
    339static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
    340	__acquires(&hctx->lock)
    341{
    342	struct blk_mq_hw_ctx *hctx = m->private;
    343
    344	spin_lock(&hctx->lock);
    345	return seq_list_start(&hctx->dispatch, *pos);
    346}
    347
    348static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
    349{
    350	struct blk_mq_hw_ctx *hctx = m->private;
    351
    352	return seq_list_next(v, &hctx->dispatch, pos);
    353}
    354
    355static void hctx_dispatch_stop(struct seq_file *m, void *v)
    356	__releases(&hctx->lock)
    357{
    358	struct blk_mq_hw_ctx *hctx = m->private;
    359
    360	spin_unlock(&hctx->lock);
    361}
    362
    363static const struct seq_operations hctx_dispatch_seq_ops = {
    364	.start	= hctx_dispatch_start,
    365	.next	= hctx_dispatch_next,
    366	.stop	= hctx_dispatch_stop,
    367	.show	= blk_mq_debugfs_rq_show,
    368};
    369
    370struct show_busy_params {
    371	struct seq_file		*m;
    372	struct blk_mq_hw_ctx	*hctx;
    373};
    374
    375/*
    376 * Note: the state of a request may change while this function is in progress,
    377 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
    378 * keep iterating requests.
    379 */
    380static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
    381{
    382	const struct show_busy_params *params = data;
    383
    384	if (rq->mq_hctx == params->hctx)
    385		__blk_mq_debugfs_rq_show(params->m, rq);
    386
    387	return true;
    388}
    389
    390static int hctx_busy_show(void *data, struct seq_file *m)
    391{
    392	struct blk_mq_hw_ctx *hctx = data;
    393	struct show_busy_params params = { .m = m, .hctx = hctx };
    394
    395	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
    396				&params);
    397
    398	return 0;
    399}
    400
    401static const char *const hctx_types[] = {
    402	[HCTX_TYPE_DEFAULT]	= "default",
    403	[HCTX_TYPE_READ]	= "read",
    404	[HCTX_TYPE_POLL]	= "poll",
    405};
    406
    407static int hctx_type_show(void *data, struct seq_file *m)
    408{
    409	struct blk_mq_hw_ctx *hctx = data;
    410
    411	BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
    412	seq_printf(m, "%s\n", hctx_types[hctx->type]);
    413	return 0;
    414}
    415
    416static int hctx_ctx_map_show(void *data, struct seq_file *m)
    417{
    418	struct blk_mq_hw_ctx *hctx = data;
    419
    420	sbitmap_bitmap_show(&hctx->ctx_map, m);
    421	return 0;
    422}
    423
    424static void blk_mq_debugfs_tags_show(struct seq_file *m,
    425				     struct blk_mq_tags *tags)
    426{
    427	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
    428	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
    429	seq_printf(m, "active_queues=%d\n",
    430		   atomic_read(&tags->active_queues));
    431
    432	seq_puts(m, "\nbitmap_tags:\n");
    433	sbitmap_queue_show(&tags->bitmap_tags, m);
    434
    435	if (tags->nr_reserved_tags) {
    436		seq_puts(m, "\nbreserved_tags:\n");
    437		sbitmap_queue_show(&tags->breserved_tags, m);
    438	}
    439}
    440
    441static int hctx_tags_show(void *data, struct seq_file *m)
    442{
    443	struct blk_mq_hw_ctx *hctx = data;
    444	struct request_queue *q = hctx->queue;
    445	int res;
    446
    447	res = mutex_lock_interruptible(&q->sysfs_lock);
    448	if (res)
    449		goto out;
    450	if (hctx->tags)
    451		blk_mq_debugfs_tags_show(m, hctx->tags);
    452	mutex_unlock(&q->sysfs_lock);
    453
    454out:
    455	return res;
    456}
    457
    458static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
    459{
    460	struct blk_mq_hw_ctx *hctx = data;
    461	struct request_queue *q = hctx->queue;
    462	int res;
    463
    464	res = mutex_lock_interruptible(&q->sysfs_lock);
    465	if (res)
    466		goto out;
    467	if (hctx->tags)
    468		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
    469	mutex_unlock(&q->sysfs_lock);
    470
    471out:
    472	return res;
    473}
    474
    475static int hctx_sched_tags_show(void *data, struct seq_file *m)
    476{
    477	struct blk_mq_hw_ctx *hctx = data;
    478	struct request_queue *q = hctx->queue;
    479	int res;
    480
    481	res = mutex_lock_interruptible(&q->sysfs_lock);
    482	if (res)
    483		goto out;
    484	if (hctx->sched_tags)
    485		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
    486	mutex_unlock(&q->sysfs_lock);
    487
    488out:
    489	return res;
    490}
    491
    492static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
    493{
    494	struct blk_mq_hw_ctx *hctx = data;
    495	struct request_queue *q = hctx->queue;
    496	int res;
    497
    498	res = mutex_lock_interruptible(&q->sysfs_lock);
    499	if (res)
    500		goto out;
    501	if (hctx->sched_tags)
    502		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
    503	mutex_unlock(&q->sysfs_lock);
    504
    505out:
    506	return res;
    507}
    508
    509static int hctx_run_show(void *data, struct seq_file *m)
    510{
    511	struct blk_mq_hw_ctx *hctx = data;
    512
    513	seq_printf(m, "%lu\n", hctx->run);
    514	return 0;
    515}
    516
    517static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
    518			      loff_t *ppos)
    519{
    520	struct blk_mq_hw_ctx *hctx = data;
    521
    522	hctx->run = 0;
    523	return count;
    524}
    525
    526static int hctx_active_show(void *data, struct seq_file *m)
    527{
    528	struct blk_mq_hw_ctx *hctx = data;
    529
    530	seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
    531	return 0;
    532}
    533
    534static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
    535{
    536	struct blk_mq_hw_ctx *hctx = data;
    537
    538	seq_printf(m, "%u\n", hctx->dispatch_busy);
    539	return 0;
    540}
    541
    542#define CTX_RQ_SEQ_OPS(name, type)					\
    543static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
    544	__acquires(&ctx->lock)						\
    545{									\
    546	struct blk_mq_ctx *ctx = m->private;				\
    547									\
    548	spin_lock(&ctx->lock);						\
    549	return seq_list_start(&ctx->rq_lists[type], *pos);		\
    550}									\
    551									\
    552static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,	\
    553				     loff_t *pos)			\
    554{									\
    555	struct blk_mq_ctx *ctx = m->private;				\
    556									\
    557	return seq_list_next(v, &ctx->rq_lists[type], pos);		\
    558}									\
    559									\
    560static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)	\
    561	__releases(&ctx->lock)						\
    562{									\
    563	struct blk_mq_ctx *ctx = m->private;				\
    564									\
    565	spin_unlock(&ctx->lock);					\
    566}									\
    567									\
    568static const struct seq_operations ctx_##name##_rq_list_seq_ops = {	\
    569	.start	= ctx_##name##_rq_list_start,				\
    570	.next	= ctx_##name##_rq_list_next,				\
    571	.stop	= ctx_##name##_rq_list_stop,				\
    572	.show	= blk_mq_debugfs_rq_show,				\
    573}
    574
    575CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
    576CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
    577CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
    578
    579static int blk_mq_debugfs_show(struct seq_file *m, void *v)
    580{
    581	const struct blk_mq_debugfs_attr *attr = m->private;
    582	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
    583
    584	return attr->show(data, m);
    585}
    586
    587static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
    588				    size_t count, loff_t *ppos)
    589{
    590	struct seq_file *m = file->private_data;
    591	const struct blk_mq_debugfs_attr *attr = m->private;
    592	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
    593
    594	/*
    595	 * Attributes that only implement .seq_ops are read-only and 'attr' is
    596	 * the same with 'data' in this case.
    597	 */
    598	if (attr == data || !attr->write)
    599		return -EPERM;
    600
    601	return attr->write(data, buf, count, ppos);
    602}
    603
    604static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
    605{
    606	const struct blk_mq_debugfs_attr *attr = inode->i_private;
    607	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
    608	struct seq_file *m;
    609	int ret;
    610
    611	if (attr->seq_ops) {
    612		ret = seq_open(file, attr->seq_ops);
    613		if (!ret) {
    614			m = file->private_data;
    615			m->private = data;
    616		}
    617		return ret;
    618	}
    619
    620	if (WARN_ON_ONCE(!attr->show))
    621		return -EPERM;
    622
    623	return single_open(file, blk_mq_debugfs_show, inode->i_private);
    624}
    625
    626static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
    627{
    628	const struct blk_mq_debugfs_attr *attr = inode->i_private;
    629
    630	if (attr->show)
    631		return single_release(inode, file);
    632
    633	return seq_release(inode, file);
    634}
    635
    636static const struct file_operations blk_mq_debugfs_fops = {
    637	.open		= blk_mq_debugfs_open,
    638	.read		= seq_read,
    639	.write		= blk_mq_debugfs_write,
    640	.llseek		= seq_lseek,
    641	.release	= blk_mq_debugfs_release,
    642};
    643
    644static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
    645	{"state", 0400, hctx_state_show},
    646	{"flags", 0400, hctx_flags_show},
    647	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
    648	{"busy", 0400, hctx_busy_show},
    649	{"ctx_map", 0400, hctx_ctx_map_show},
    650	{"tags", 0400, hctx_tags_show},
    651	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
    652	{"sched_tags", 0400, hctx_sched_tags_show},
    653	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
    654	{"run", 0600, hctx_run_show, hctx_run_write},
    655	{"active", 0400, hctx_active_show},
    656	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
    657	{"type", 0400, hctx_type_show},
    658	{},
    659};
    660
    661static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
    662	{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
    663	{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
    664	{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
    665	{},
    666};
    667
    668static void debugfs_create_files(struct dentry *parent, void *data,
    669				 const struct blk_mq_debugfs_attr *attr)
    670{
    671	if (IS_ERR_OR_NULL(parent))
    672		return;
    673
    674	d_inode(parent)->i_private = data;
    675
    676	for (; attr->name; attr++)
    677		debugfs_create_file(attr->name, attr->mode, parent,
    678				    (void *)attr, &blk_mq_debugfs_fops);
    679}
    680
    681void blk_mq_debugfs_register(struct request_queue *q)
    682{
    683	struct blk_mq_hw_ctx *hctx;
    684	unsigned long i;
    685
    686	debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
    687
    688	/*
    689	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
    690	 * didn't exist yet (because we don't know what to name the directory
    691	 * until the queue is registered to a gendisk).
    692	 */
    693	if (q->elevator && !q->sched_debugfs_dir)
    694		blk_mq_debugfs_register_sched(q);
    695
    696	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
    697	queue_for_each_hw_ctx(q, hctx, i) {
    698		if (!hctx->debugfs_dir)
    699			blk_mq_debugfs_register_hctx(q, hctx);
    700		if (q->elevator && !hctx->sched_debugfs_dir)
    701			blk_mq_debugfs_register_sched_hctx(q, hctx);
    702	}
    703
    704	if (q->rq_qos) {
    705		struct rq_qos *rqos = q->rq_qos;
    706
    707		while (rqos) {
    708			blk_mq_debugfs_register_rqos(rqos);
    709			rqos = rqos->next;
    710		}
    711	}
    712}
    713
    714static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
    715					struct blk_mq_ctx *ctx)
    716{
    717	struct dentry *ctx_dir;
    718	char name[20];
    719
    720	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
    721	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
    722
    723	debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
    724}
    725
    726void blk_mq_debugfs_register_hctx(struct request_queue *q,
    727				  struct blk_mq_hw_ctx *hctx)
    728{
    729	struct blk_mq_ctx *ctx;
    730	char name[20];
    731	int i;
    732
    733	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
    734	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
    735
    736	debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
    737
    738	hctx_for_each_ctx(hctx, ctx, i)
    739		blk_mq_debugfs_register_ctx(hctx, ctx);
    740}
    741
    742void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
    743{
    744	if (!hctx->queue->debugfs_dir)
    745		return;
    746	debugfs_remove_recursive(hctx->debugfs_dir);
    747	hctx->sched_debugfs_dir = NULL;
    748	hctx->debugfs_dir = NULL;
    749}
    750
    751void blk_mq_debugfs_register_hctxs(struct request_queue *q)
    752{
    753	struct blk_mq_hw_ctx *hctx;
    754	unsigned long i;
    755
    756	queue_for_each_hw_ctx(q, hctx, i)
    757		blk_mq_debugfs_register_hctx(q, hctx);
    758}
    759
    760void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
    761{
    762	struct blk_mq_hw_ctx *hctx;
    763	unsigned long i;
    764
    765	queue_for_each_hw_ctx(q, hctx, i)
    766		blk_mq_debugfs_unregister_hctx(hctx);
    767}
    768
    769void blk_mq_debugfs_register_sched(struct request_queue *q)
    770{
    771	struct elevator_type *e = q->elevator->type;
    772
    773	lockdep_assert_held(&q->debugfs_mutex);
    774
    775	/*
    776	 * If the parent directory has not been created yet, return, we will be
    777	 * called again later on and the directory/files will be created then.
    778	 */
    779	if (!q->debugfs_dir)
    780		return;
    781
    782	if (!e->queue_debugfs_attrs)
    783		return;
    784
    785	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
    786
    787	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
    788}
    789
    790void blk_mq_debugfs_unregister_sched(struct request_queue *q)
    791{
    792	lockdep_assert_held(&q->debugfs_mutex);
    793
    794	debugfs_remove_recursive(q->sched_debugfs_dir);
    795	q->sched_debugfs_dir = NULL;
    796}
    797
    798static const char *rq_qos_id_to_name(enum rq_qos_id id)
    799{
    800	switch (id) {
    801	case RQ_QOS_WBT:
    802		return "wbt";
    803	case RQ_QOS_LATENCY:
    804		return "latency";
    805	case RQ_QOS_COST:
    806		return "cost";
    807	case RQ_QOS_IOPRIO:
    808		return "ioprio";
    809	}
    810	return "unknown";
    811}
    812
    813void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
    814{
    815	lockdep_assert_held(&rqos->q->debugfs_mutex);
    816
    817	if (!rqos->q->debugfs_dir)
    818		return;
    819	debugfs_remove_recursive(rqos->debugfs_dir);
    820	rqos->debugfs_dir = NULL;
    821}
    822
    823void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
    824{
    825	struct request_queue *q = rqos->q;
    826	const char *dir_name = rq_qos_id_to_name(rqos->id);
    827
    828	lockdep_assert_held(&q->debugfs_mutex);
    829
    830	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
    831		return;
    832
    833	if (!q->rqos_debugfs_dir)
    834		q->rqos_debugfs_dir = debugfs_create_dir("rqos",
    835							 q->debugfs_dir);
    836
    837	rqos->debugfs_dir = debugfs_create_dir(dir_name,
    838					       rqos->q->rqos_debugfs_dir);
    839
    840	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
    841}
    842
    843void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
    844					struct blk_mq_hw_ctx *hctx)
    845{
    846	struct elevator_type *e = q->elevator->type;
    847
    848	lockdep_assert_held(&q->debugfs_mutex);
    849
    850	/*
    851	 * If the parent debugfs directory has not been created yet, return;
    852	 * We will be called again later on with appropriate parent debugfs
    853	 * directory from blk_register_queue()
    854	 */
    855	if (!hctx->debugfs_dir)
    856		return;
    857
    858	if (!e->hctx_debugfs_attrs)
    859		return;
    860
    861	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
    862						     hctx->debugfs_dir);
    863	debugfs_create_files(hctx->sched_debugfs_dir, hctx,
    864			     e->hctx_debugfs_attrs);
    865}
    866
    867void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
    868{
    869	lockdep_assert_held(&hctx->queue->debugfs_mutex);
    870
    871	if (!hctx->queue->debugfs_dir)
    872		return;
    873	debugfs_remove_recursive(hctx->sched_debugfs_dir);
    874	hctx->sched_debugfs_dir = NULL;
    875}