cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

trace_stat.c (7765B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Infrastructure for statistic tracing (histogram output).
      4 *
      5 * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
      6 *
      7 * Based on the code from trace_branch.c which is
      8 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
      9 *
     10 */
     11
     12#include <linux/security.h>
     13#include <linux/list.h>
     14#include <linux/slab.h>
     15#include <linux/rbtree.h>
     16#include <linux/tracefs.h>
     17#include "trace_stat.h"
     18#include "trace.h"
     19
     20
     21/*
     22 * List of stat red-black nodes from a tracer
     23 * We use a such tree to sort quickly the stat
     24 * entries from the tracer.
     25 */
     26struct stat_node {
     27	struct rb_node		node;
     28	void			*stat;
     29};
     30
     31/* A stat session is the stats output in one file */
     32struct stat_session {
     33	struct list_head	session_list;
     34	struct tracer_stat	*ts;
     35	struct rb_root		stat_root;
     36	struct mutex		stat_mutex;
     37	struct dentry		*file;
     38};
     39
     40/* All of the sessions currently in use. Each stat file embed one session */
     41static LIST_HEAD(all_stat_sessions);
     42static DEFINE_MUTEX(all_stat_sessions_mutex);
     43
     44/* The root directory for all stat files */
     45static struct dentry		*stat_dir;
     46
     47static void __reset_stat_session(struct stat_session *session)
     48{
     49	struct stat_node *snode, *n;
     50
     51	rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
     52		if (session->ts->stat_release)
     53			session->ts->stat_release(snode->stat);
     54		kfree(snode);
     55	}
     56
     57	session->stat_root = RB_ROOT;
     58}
     59
     60static void reset_stat_session(struct stat_session *session)
     61{
     62	mutex_lock(&session->stat_mutex);
     63	__reset_stat_session(session);
     64	mutex_unlock(&session->stat_mutex);
     65}
     66
     67static void destroy_session(struct stat_session *session)
     68{
     69	tracefs_remove(session->file);
     70	__reset_stat_session(session);
     71	mutex_destroy(&session->stat_mutex);
     72	kfree(session);
     73}
     74
     75static int insert_stat(struct rb_root *root, void *stat, cmp_func_t cmp)
     76{
     77	struct rb_node **new = &(root->rb_node), *parent = NULL;
     78	struct stat_node *data;
     79
     80	data = kzalloc(sizeof(*data), GFP_KERNEL);
     81	if (!data)
     82		return -ENOMEM;
     83	data->stat = stat;
     84
     85	/*
     86	 * Figure out where to put new node
     87	 * This is a descendent sorting
     88	 */
     89	while (*new) {
     90		struct stat_node *this;
     91		int result;
     92
     93		this = container_of(*new, struct stat_node, node);
     94		result = cmp(data->stat, this->stat);
     95
     96		parent = *new;
     97		if (result >= 0)
     98			new = &((*new)->rb_left);
     99		else
    100			new = &((*new)->rb_right);
    101	}
    102
    103	rb_link_node(&data->node, parent, new);
    104	rb_insert_color(&data->node, root);
    105	return 0;
    106}
    107
    108/*
    109 * For tracers that don't provide a stat_cmp callback.
    110 * This one will force an insertion as right-most node
    111 * in the rbtree.
    112 */
    113static int dummy_cmp(const void *p1, const void *p2)
    114{
    115	return -1;
    116}
    117
    118/*
    119 * Initialize the stat rbtree at each trace_stat file opening.
    120 * All of these copies and sorting are required on all opening
    121 * since the stats could have changed between two file sessions.
    122 */
    123static int stat_seq_init(struct stat_session *session)
    124{
    125	struct tracer_stat *ts = session->ts;
    126	struct rb_root *root = &session->stat_root;
    127	void *stat;
    128	int ret = 0;
    129	int i;
    130
    131	mutex_lock(&session->stat_mutex);
    132	__reset_stat_session(session);
    133
    134	if (!ts->stat_cmp)
    135		ts->stat_cmp = dummy_cmp;
    136
    137	stat = ts->stat_start(ts);
    138	if (!stat)
    139		goto exit;
    140
    141	ret = insert_stat(root, stat, ts->stat_cmp);
    142	if (ret)
    143		goto exit;
    144
    145	/*
    146	 * Iterate over the tracer stat entries and store them in an rbtree.
    147	 */
    148	for (i = 1; ; i++) {
    149		stat = ts->stat_next(stat, i);
    150
    151		/* End of insertion */
    152		if (!stat)
    153			break;
    154
    155		ret = insert_stat(root, stat, ts->stat_cmp);
    156		if (ret)
    157			goto exit_free_rbtree;
    158	}
    159
    160exit:
    161	mutex_unlock(&session->stat_mutex);
    162	return ret;
    163
    164exit_free_rbtree:
    165	__reset_stat_session(session);
    166	mutex_unlock(&session->stat_mutex);
    167	return ret;
    168}
    169
    170
    171static void *stat_seq_start(struct seq_file *s, loff_t *pos)
    172{
    173	struct stat_session *session = s->private;
    174	struct rb_node *node;
    175	int n = *pos;
    176	int i;
    177
    178	/* Prevent from tracer switch or rbtree modification */
    179	mutex_lock(&session->stat_mutex);
    180
    181	/* If we are in the beginning of the file, print the headers */
    182	if (session->ts->stat_headers) {
    183		if (n == 0)
    184			return SEQ_START_TOKEN;
    185		n--;
    186	}
    187
    188	node = rb_first(&session->stat_root);
    189	for (i = 0; node && i < n; i++)
    190		node = rb_next(node);
    191
    192	return node;
    193}
    194
    195static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
    196{
    197	struct stat_session *session = s->private;
    198	struct rb_node *node = p;
    199
    200	(*pos)++;
    201
    202	if (p == SEQ_START_TOKEN)
    203		return rb_first(&session->stat_root);
    204
    205	return rb_next(node);
    206}
    207
    208static void stat_seq_stop(struct seq_file *s, void *p)
    209{
    210	struct stat_session *session = s->private;
    211	mutex_unlock(&session->stat_mutex);
    212}
    213
    214static int stat_seq_show(struct seq_file *s, void *v)
    215{
    216	struct stat_session *session = s->private;
    217	struct stat_node *l = container_of(v, struct stat_node, node);
    218
    219	if (v == SEQ_START_TOKEN)
    220		return session->ts->stat_headers(s);
    221
    222	return session->ts->stat_show(s, l->stat);
    223}
    224
    225static const struct seq_operations trace_stat_seq_ops = {
    226	.start		= stat_seq_start,
    227	.next		= stat_seq_next,
    228	.stop		= stat_seq_stop,
    229	.show		= stat_seq_show
    230};
    231
    232/* The session stat is refilled and resorted at each stat file opening */
    233static int tracing_stat_open(struct inode *inode, struct file *file)
    234{
    235	int ret;
    236	struct seq_file *m;
    237	struct stat_session *session = inode->i_private;
    238
    239	ret = security_locked_down(LOCKDOWN_TRACEFS);
    240	if (ret)
    241		return ret;
    242
    243	ret = stat_seq_init(session);
    244	if (ret)
    245		return ret;
    246
    247	ret = seq_open(file, &trace_stat_seq_ops);
    248	if (ret) {
    249		reset_stat_session(session);
    250		return ret;
    251	}
    252
    253	m = file->private_data;
    254	m->private = session;
    255	return ret;
    256}
    257
    258/*
    259 * Avoid consuming memory with our now useless rbtree.
    260 */
    261static int tracing_stat_release(struct inode *i, struct file *f)
    262{
    263	struct stat_session *session = i->i_private;
    264
    265	reset_stat_session(session);
    266
    267	return seq_release(i, f);
    268}
    269
    270static const struct file_operations tracing_stat_fops = {
    271	.open		= tracing_stat_open,
    272	.read		= seq_read,
    273	.llseek		= seq_lseek,
    274	.release	= tracing_stat_release
    275};
    276
    277static int tracing_stat_init(void)
    278{
    279	int ret;
    280
    281	ret = tracing_init_dentry();
    282	if (ret)
    283		return -ENODEV;
    284
    285	stat_dir = tracefs_create_dir("trace_stat", NULL);
    286	if (!stat_dir) {
    287		pr_warn("Could not create tracefs 'trace_stat' entry\n");
    288		return -ENOMEM;
    289	}
    290	return 0;
    291}
    292
    293static int init_stat_file(struct stat_session *session)
    294{
    295	int ret;
    296
    297	if (!stat_dir && (ret = tracing_stat_init()))
    298		return ret;
    299
    300	session->file = tracefs_create_file(session->ts->name, TRACE_MODE_WRITE,
    301					    stat_dir, session,
    302					    &tracing_stat_fops);
    303	if (!session->file)
    304		return -ENOMEM;
    305	return 0;
    306}
    307
    308int register_stat_tracer(struct tracer_stat *trace)
    309{
    310	struct stat_session *session, *node;
    311	int ret = -EINVAL;
    312
    313	if (!trace)
    314		return -EINVAL;
    315
    316	if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
    317		return -EINVAL;
    318
    319	/* Already registered? */
    320	mutex_lock(&all_stat_sessions_mutex);
    321	list_for_each_entry(node, &all_stat_sessions, session_list) {
    322		if (node->ts == trace)
    323			goto out;
    324	}
    325
    326	ret = -ENOMEM;
    327	/* Init the session */
    328	session = kzalloc(sizeof(*session), GFP_KERNEL);
    329	if (!session)
    330		goto out;
    331
    332	session->ts = trace;
    333	INIT_LIST_HEAD(&session->session_list);
    334	mutex_init(&session->stat_mutex);
    335
    336	ret = init_stat_file(session);
    337	if (ret) {
    338		destroy_session(session);
    339		goto out;
    340	}
    341
    342	ret = 0;
    343	/* Register */
    344	list_add_tail(&session->session_list, &all_stat_sessions);
    345 out:
    346	mutex_unlock(&all_stat_sessions_mutex);
    347
    348	return ret;
    349}
    350
    351void unregister_stat_tracer(struct tracer_stat *trace)
    352{
    353	struct stat_session *node, *tmp;
    354
    355	mutex_lock(&all_stat_sessions_mutex);
    356	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
    357		if (node->ts == trace) {
    358			list_del(&node->session_list);
    359			destroy_session(node);
    360			break;
    361		}
    362	}
    363	mutex_unlock(&all_stat_sessions_mutex);
    364}