cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

debug.c (8409B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Debug controller
      4 *
      5 * WARNING: This controller is for cgroup core debugging only.
      6 * Its interfaces are unstable and subject to changes at any time.
      7 */
      8#include <linux/ctype.h>
      9#include <linux/mm.h>
     10#include <linux/slab.h>
     11
     12#include "cgroup-internal.h"
     13
     14static struct cgroup_subsys_state *
     15debug_css_alloc(struct cgroup_subsys_state *parent_css)
     16{
     17	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
     18
     19	if (!css)
     20		return ERR_PTR(-ENOMEM);
     21
     22	return css;
     23}
     24
     25static void debug_css_free(struct cgroup_subsys_state *css)
     26{
     27	kfree(css);
     28}
     29
     30/*
     31 * debug_taskcount_read - return the number of tasks in a cgroup.
     32 * @cgrp: the cgroup in question
     33 */
     34static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
     35				struct cftype *cft)
     36{
     37	return cgroup_task_count(css->cgroup);
     38}
     39
     40static int current_css_set_read(struct seq_file *seq, void *v)
     41{
     42	struct kernfs_open_file *of = seq->private;
     43	struct css_set *cset;
     44	struct cgroup_subsys *ss;
     45	struct cgroup_subsys_state *css;
     46	int i, refcnt;
     47
     48	if (!cgroup_kn_lock_live(of->kn, false))
     49		return -ENODEV;
     50
     51	spin_lock_irq(&css_set_lock);
     52	rcu_read_lock();
     53	cset = task_css_set(current);
     54	refcnt = refcount_read(&cset->refcount);
     55	seq_printf(seq, "css_set %pK %d", cset, refcnt);
     56	if (refcnt > cset->nr_tasks)
     57		seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
     58	seq_puts(seq, "\n");
     59
     60	/*
     61	 * Print the css'es stored in the current css_set.
     62	 */
     63	for_each_subsys(ss, i) {
     64		css = cset->subsys[ss->id];
     65		if (!css)
     66			continue;
     67		seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name,
     68			  css, css->id);
     69	}
     70	rcu_read_unlock();
     71	spin_unlock_irq(&css_set_lock);
     72	cgroup_kn_unlock(of->kn);
     73	return 0;
     74}
     75
     76static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
     77					 struct cftype *cft)
     78{
     79	u64 count;
     80
     81	rcu_read_lock();
     82	count = refcount_read(&task_css_set(current)->refcount);
     83	rcu_read_unlock();
     84	return count;
     85}
     86
     87static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
     88{
     89	struct cgrp_cset_link *link;
     90	struct css_set *cset;
     91	char *name_buf;
     92
     93	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
     94	if (!name_buf)
     95		return -ENOMEM;
     96
     97	spin_lock_irq(&css_set_lock);
     98	rcu_read_lock();
     99	cset = task_css_set(current);
    100	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
    101		struct cgroup *c = link->cgrp;
    102
    103		cgroup_name(c, name_buf, NAME_MAX + 1);
    104		seq_printf(seq, "Root %d group %s\n",
    105			   c->root->hierarchy_id, name_buf);
    106	}
    107	rcu_read_unlock();
    108	spin_unlock_irq(&css_set_lock);
    109	kfree(name_buf);
    110	return 0;
    111}
    112
    113#define MAX_TASKS_SHOWN_PER_CSS 25
    114static int cgroup_css_links_read(struct seq_file *seq, void *v)
    115{
    116	struct cgroup_subsys_state *css = seq_css(seq);
    117	struct cgrp_cset_link *link;
    118	int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
    119
    120	spin_lock_irq(&css_set_lock);
    121
    122	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
    123		struct css_set *cset = link->cset;
    124		struct task_struct *task;
    125		int count = 0;
    126		int refcnt = refcount_read(&cset->refcount);
    127
    128		/*
    129		 * Print out the proc_cset and threaded_cset relationship
    130		 * and highlight difference between refcount and task_count.
    131		 */
    132		seq_printf(seq, "css_set %pK", cset);
    133		if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
    134			threaded_csets++;
    135			seq_printf(seq, "=>%pK", cset->dom_cset);
    136		}
    137		if (!list_empty(&cset->threaded_csets)) {
    138			struct css_set *tcset;
    139			int idx = 0;
    140
    141			list_for_each_entry(tcset, &cset->threaded_csets,
    142					    threaded_csets_node) {
    143				seq_puts(seq, idx ? "," : "<=");
    144				seq_printf(seq, "%pK", tcset);
    145				idx++;
    146			}
    147		} else {
    148			seq_printf(seq, " %d", refcnt);
    149			if (refcnt - cset->nr_tasks > 0) {
    150				int extra = refcnt - cset->nr_tasks;
    151
    152				seq_printf(seq, " +%d", extra);
    153				/*
    154				 * Take out the one additional reference in
    155				 * init_css_set.
    156				 */
    157				if (cset == &init_css_set)
    158					extra--;
    159				extra_refs += extra;
    160			}
    161		}
    162		seq_puts(seq, "\n");
    163
    164		list_for_each_entry(task, &cset->tasks, cg_list) {
    165			if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
    166				seq_printf(seq, "  task %d\n",
    167					   task_pid_vnr(task));
    168		}
    169
    170		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
    171			if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
    172				seq_printf(seq, "  task %d\n",
    173					   task_pid_vnr(task));
    174		}
    175		/* show # of overflowed tasks */
    176		if (count > MAX_TASKS_SHOWN_PER_CSS)
    177			seq_printf(seq, "  ... (%d)\n",
    178				   count - MAX_TASKS_SHOWN_PER_CSS);
    179
    180		if (cset->dead) {
    181			seq_puts(seq, "    [dead]\n");
    182			dead_cnt++;
    183		}
    184
    185		WARN_ON(count != cset->nr_tasks);
    186	}
    187	spin_unlock_irq(&css_set_lock);
    188
    189	if (!dead_cnt && !extra_refs && !threaded_csets)
    190		return 0;
    191
    192	seq_puts(seq, "\n");
    193	if (threaded_csets)
    194		seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
    195	if (extra_refs)
    196		seq_printf(seq, "extra references = %d\n", extra_refs);
    197	if (dead_cnt)
    198		seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
    199
    200	return 0;
    201}
    202
    203static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
    204{
    205	struct kernfs_open_file *of = seq->private;
    206	struct cgroup *cgrp;
    207	struct cgroup_subsys *ss;
    208	struct cgroup_subsys_state *css;
    209	char pbuf[16];
    210	int i;
    211
    212	cgrp = cgroup_kn_lock_live(of->kn, false);
    213	if (!cgrp)
    214		return -ENODEV;
    215
    216	for_each_subsys(ss, i) {
    217		css = rcu_dereference_check(cgrp->subsys[ss->id], true);
    218		if (!css)
    219			continue;
    220
    221		pbuf[0] = '\0';
    222
    223		/* Show the parent CSS if applicable*/
    224		if (css->parent)
    225			snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
    226				 css->parent->id);
    227		seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name,
    228			  css, css->id,
    229			  atomic_read(&css->online_cnt), pbuf);
    230	}
    231
    232	cgroup_kn_unlock(of->kn);
    233	return 0;
    234}
    235
    236static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
    237				  u16 mask)
    238{
    239	struct cgroup_subsys *ss;
    240	int ssid;
    241	bool first = true;
    242
    243	seq_printf(seq, "%-17s: ", name);
    244	for_each_subsys(ss, ssid) {
    245		if (!(mask & (1 << ssid)))
    246			continue;
    247		if (!first)
    248			seq_puts(seq, ", ");
    249		seq_puts(seq, ss->name);
    250		first = false;
    251	}
    252	seq_putc(seq, '\n');
    253}
    254
    255static int cgroup_masks_read(struct seq_file *seq, void *v)
    256{
    257	struct kernfs_open_file *of = seq->private;
    258	struct cgroup *cgrp;
    259
    260	cgrp = cgroup_kn_lock_live(of->kn, false);
    261	if (!cgrp)
    262		return -ENODEV;
    263
    264	cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
    265	cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
    266
    267	cgroup_kn_unlock(of->kn);
    268	return 0;
    269}
    270
    271static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
    272{
    273	return (!cgroup_is_populated(css->cgroup) &&
    274		!css_has_online_children(&css->cgroup->self));
    275}
    276
    277static struct cftype debug_legacy_files[] =  {
    278	{
    279		.name = "taskcount",
    280		.read_u64 = debug_taskcount_read,
    281	},
    282
    283	{
    284		.name = "current_css_set",
    285		.seq_show = current_css_set_read,
    286		.flags = CFTYPE_ONLY_ON_ROOT,
    287	},
    288
    289	{
    290		.name = "current_css_set_refcount",
    291		.read_u64 = current_css_set_refcount_read,
    292		.flags = CFTYPE_ONLY_ON_ROOT,
    293	},
    294
    295	{
    296		.name = "current_css_set_cg_links",
    297		.seq_show = current_css_set_cg_links_read,
    298		.flags = CFTYPE_ONLY_ON_ROOT,
    299	},
    300
    301	{
    302		.name = "cgroup_css_links",
    303		.seq_show = cgroup_css_links_read,
    304	},
    305
    306	{
    307		.name = "cgroup_subsys_states",
    308		.seq_show = cgroup_subsys_states_read,
    309	},
    310
    311	{
    312		.name = "cgroup_masks",
    313		.seq_show = cgroup_masks_read,
    314	},
    315
    316	{
    317		.name = "releasable",
    318		.read_u64 = releasable_read,
    319	},
    320
    321	{ }	/* terminate */
    322};
    323
    324static struct cftype debug_files[] =  {
    325	{
    326		.name = "taskcount",
    327		.read_u64 = debug_taskcount_read,
    328	},
    329
    330	{
    331		.name = "current_css_set",
    332		.seq_show = current_css_set_read,
    333		.flags = CFTYPE_ONLY_ON_ROOT,
    334	},
    335
    336	{
    337		.name = "current_css_set_refcount",
    338		.read_u64 = current_css_set_refcount_read,
    339		.flags = CFTYPE_ONLY_ON_ROOT,
    340	},
    341
    342	{
    343		.name = "current_css_set_cg_links",
    344		.seq_show = current_css_set_cg_links_read,
    345		.flags = CFTYPE_ONLY_ON_ROOT,
    346	},
    347
    348	{
    349		.name = "css_links",
    350		.seq_show = cgroup_css_links_read,
    351	},
    352
    353	{
    354		.name = "csses",
    355		.seq_show = cgroup_subsys_states_read,
    356	},
    357
    358	{
    359		.name = "masks",
    360		.seq_show = cgroup_masks_read,
    361	},
    362
    363	{ }	/* terminate */
    364};
    365
    366struct cgroup_subsys debug_cgrp_subsys = {
    367	.css_alloc	= debug_css_alloc,
    368	.css_free	= debug_css_free,
    369	.legacy_cftypes	= debug_legacy_files,
    370};
    371
    372/*
    373 * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
    374 * parameter.
    375 */
    376void __init enable_debug_cgroup(void)
    377{
    378	debug_cgrp_subsys.dfl_cftypes = debug_files;
    379	debug_cgrp_subsys.implicit_on_dfl = true;
    380	debug_cgrp_subsys.threaded = true;
    381}