cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

trace_boot.c (16166B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * trace_boot.c
      4 * Tracing kernel boot-time
      5 */
      6
      7#define pr_fmt(fmt)	"trace_boot: " fmt
      8
      9#include <linux/bootconfig.h>
     10#include <linux/cpumask.h>
     11#include <linux/ftrace.h>
     12#include <linux/init.h>
     13#include <linux/kernel.h>
     14#include <linux/mutex.h>
     15#include <linux/string.h>
     16#include <linux/slab.h>
     17#include <linux/trace.h>
     18#include <linux/trace_events.h>
     19
     20#include "trace.h"
     21
     22#define MAX_BUF_LEN 256
     23
     24static void __init
     25trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
     26{
     27	struct xbc_node *anode;
     28	const char *p;
     29	char buf[MAX_BUF_LEN];
     30	unsigned long v = 0;
     31
     32	/* Common ftrace options */
     33	xbc_node_for_each_array_value(node, "options", anode, p) {
     34		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
     35			pr_err("String is too long: %s\n", p);
     36			continue;
     37		}
     38
     39		if (trace_set_options(tr, buf) < 0)
     40			pr_err("Failed to set option: %s\n", buf);
     41	}
     42
     43	p = xbc_node_find_value(node, "tracing_on", NULL);
     44	if (p && *p != '\0') {
     45		if (kstrtoul(p, 10, &v))
     46			pr_err("Failed to set tracing on: %s\n", p);
     47		if (v)
     48			tracer_tracing_on(tr);
     49		else
     50			tracer_tracing_off(tr);
     51	}
     52
     53	p = xbc_node_find_value(node, "trace_clock", NULL);
     54	if (p && *p != '\0') {
     55		if (tracing_set_clock(tr, p) < 0)
     56			pr_err("Failed to set trace clock: %s\n", p);
     57	}
     58
     59	p = xbc_node_find_value(node, "buffer_size", NULL);
     60	if (p && *p != '\0') {
     61		v = memparse(p, NULL);
     62		if (v < PAGE_SIZE)
     63			pr_err("Buffer size is too small: %s\n", p);
     64		if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
     65			pr_err("Failed to resize trace buffer to %s\n", p);
     66	}
     67
     68	p = xbc_node_find_value(node, "cpumask", NULL);
     69	if (p && *p != '\0') {
     70		cpumask_var_t new_mask;
     71
     72		if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
     73			if (cpumask_parse(p, new_mask) < 0 ||
     74			    tracing_set_cpumask(tr, new_mask) < 0)
     75				pr_err("Failed to set new CPU mask %s\n", p);
     76			free_cpumask_var(new_mask);
     77		}
     78	}
     79}
     80
     81#ifdef CONFIG_EVENT_TRACING
     82static void __init
     83trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
     84{
     85	struct xbc_node *anode;
     86	char buf[MAX_BUF_LEN];
     87	const char *p;
     88
     89	xbc_node_for_each_array_value(node, "events", anode, p) {
     90		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
     91			pr_err("String is too long: %s\n", p);
     92			continue;
     93		}
     94
     95		if (ftrace_set_clr_event(tr, buf, 1) < 0)
     96			pr_err("Failed to enable event: %s\n", p);
     97	}
     98}
     99
    100#ifdef CONFIG_KPROBE_EVENTS
    101static int __init
    102trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
    103{
    104	struct dynevent_cmd cmd;
    105	struct xbc_node *anode;
    106	char buf[MAX_BUF_LEN];
    107	const char *val;
    108	int ret = 0;
    109
    110	xbc_node_for_each_array_value(node, "probes", anode, val) {
    111		kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
    112
    113		ret = kprobe_event_gen_cmd_start(&cmd, event, val);
    114		if (ret) {
    115			pr_err("Failed to generate probe: %s\n", buf);
    116			break;
    117		}
    118
    119		ret = kprobe_event_gen_cmd_end(&cmd);
    120		if (ret) {
    121			pr_err("Failed to add probe: %s\n", buf);
    122			break;
    123		}
    124	}
    125
    126	return ret;
    127}
    128#else
    129static inline int __init
    130trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
    131{
    132	pr_err("Kprobe event is not supported.\n");
    133	return -ENOTSUPP;
    134}
    135#endif
    136
    137#ifdef CONFIG_SYNTH_EVENTS
    138static int __init
    139trace_boot_add_synth_event(struct xbc_node *node, const char *event)
    140{
    141	struct dynevent_cmd cmd;
    142	struct xbc_node *anode;
    143	char buf[MAX_BUF_LEN];
    144	const char *p;
    145	int ret;
    146
    147	synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
    148
    149	ret = synth_event_gen_cmd_start(&cmd, event, NULL);
    150	if (ret)
    151		return ret;
    152
    153	xbc_node_for_each_array_value(node, "fields", anode, p) {
    154		ret = synth_event_add_field_str(&cmd, p);
    155		if (ret)
    156			return ret;
    157	}
    158
    159	ret = synth_event_gen_cmd_end(&cmd);
    160	if (ret < 0)
    161		pr_err("Failed to add synthetic event: %s\n", buf);
    162
    163	return ret;
    164}
    165#else
    166static inline int __init
    167trace_boot_add_synth_event(struct xbc_node *node, const char *event)
    168{
    169	pr_err("Synthetic event is not supported.\n");
    170	return -ENOTSUPP;
    171}
    172#endif
    173
    174#ifdef CONFIG_HIST_TRIGGERS
    175static int __init __printf(3, 4)
    176append_printf(char **bufp, char *end, const char *fmt, ...)
    177{
    178	va_list args;
    179	int ret;
    180
    181	if (*bufp == end)
    182		return -ENOSPC;
    183
    184	va_start(args, fmt);
    185	ret = vsnprintf(*bufp, end - *bufp, fmt, args);
    186	if (ret < end - *bufp) {
    187		*bufp += ret;
    188	} else {
    189		*bufp = end;
    190		ret = -ERANGE;
    191	}
    192	va_end(args);
    193
    194	return ret;
    195}
    196
    197static int __init
    198append_str_nospace(char **bufp, char *end, const char *str)
    199{
    200	char *p = *bufp;
    201	int len;
    202
    203	while (p < end - 1 && *str != '\0') {
    204		if (!isspace(*str))
    205			*(p++) = *str;
    206		str++;
    207	}
    208	*p = '\0';
    209	if (p == end - 1) {
    210		*bufp = end;
    211		return -ENOSPC;
    212	}
    213	len = p - *bufp;
    214	*bufp = p;
    215	return (int)len;
    216}
    217
    218static int __init
    219trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp,
    220			  char *end, const char *key)
    221{
    222	struct xbc_node *anode;
    223	const char *p;
    224	char sep;
    225
    226	p = xbc_node_find_value(hnode, key, &anode);
    227	if (p) {
    228		if (!anode) {
    229			pr_err("hist.%s requires value(s).\n", key);
    230			return -EINVAL;
    231		}
    232
    233		append_printf(bufp, end, ":%s", key);
    234		sep = '=';
    235		xbc_array_for_each_value(anode, p) {
    236			append_printf(bufp, end, "%c%s", sep, p);
    237			if (sep == '=')
    238				sep = ',';
    239		}
    240	} else
    241		return -ENOENT;
    242
    243	return 0;
    244}
    245
    246static int __init
    247trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp,
    248				char *end, const char *handler,
    249				const char *param)
    250{
    251	struct xbc_node *knode, *anode;
    252	const char *p;
    253	char sep;
    254
    255	/* Compose 'handler' parameter */
    256	p = xbc_node_find_value(hnode, param, NULL);
    257	if (!p) {
    258		pr_err("hist.%s requires '%s' option.\n",
    259		       xbc_node_get_data(hnode), param);
    260		return -EINVAL;
    261	}
    262	append_printf(bufp, end, ":%s(%s)", handler, p);
    263
    264	/* Compose 'action' parameter */
    265	knode = xbc_node_find_subkey(hnode, "trace");
    266	if (!knode)
    267		knode = xbc_node_find_subkey(hnode, "save");
    268
    269	if (knode) {
    270		anode = xbc_node_get_child(knode);
    271		if (!anode || !xbc_node_is_value(anode)) {
    272			pr_err("hist.%s.%s requires value(s).\n",
    273			       xbc_node_get_data(hnode),
    274			       xbc_node_get_data(knode));
    275			return -EINVAL;
    276		}
    277
    278		append_printf(bufp, end, ".%s", xbc_node_get_data(knode));
    279		sep = '(';
    280		xbc_array_for_each_value(anode, p) {
    281			append_printf(bufp, end, "%c%s", sep, p);
    282			if (sep == '(')
    283				sep = ',';
    284		}
    285		append_printf(bufp, end, ")");
    286	} else if (xbc_node_find_subkey(hnode, "snapshot")) {
    287		append_printf(bufp, end, ".snapshot()");
    288	} else {
    289		pr_err("hist.%s requires an action.\n",
    290		       xbc_node_get_data(hnode));
    291		return -EINVAL;
    292	}
    293
    294	return 0;
    295}
    296
    297static int __init
    298trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp,
    299			     char *end, const char *param)
    300{
    301	struct xbc_node *node;
    302	const char *p, *handler;
    303	int ret = 0;
    304
    305	handler = xbc_node_get_data(hnode);
    306
    307	xbc_node_for_each_subkey(hnode, node) {
    308		p = xbc_node_get_data(node);
    309		if (!isdigit(p[0]))
    310			continue;
    311		/* All digit started node should be instances. */
    312		ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param);
    313		if (ret < 0)
    314			break;
    315	}
    316
    317	if (xbc_node_find_subkey(hnode, param))
    318		ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param);
    319
    320	return ret;
    321}
    322
    323/*
    324 * Histogram boottime tracing syntax.
    325 *
    326 * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] {
    327 *	keys = <KEY>[,...]
    328 *	values = <VAL>[,...]
    329 *	sort = <SORT-KEY>[,...]
    330 *	size = <ENTRIES>
    331 *	name = <HISTNAME>
    332 *	var { <VAR> = <EXPR> ... }
    333 *	pause|continue|clear
    334 *	onmax|onchange[.N] { var = <VAR>; <ACTION> [= <PARAM>] }
    335 *	onmatch[.N] { event = <EVENT>; <ACTION> [= <PARAM>] }
    336 *	filter = <FILTER>
    337 * }
    338 *
    339 * Where <ACTION> are;
    340 *
    341 *	trace = <EVENT>, <ARG1>[, ...]
    342 *	save = <ARG1>[, ...]
    343 *	snapshot
    344 */
    345static int __init
    346trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size)
    347{
    348	struct xbc_node *node, *knode;
    349	char *end = buf + size;
    350	const char *p;
    351	int ret = 0;
    352
    353	append_printf(&buf, end, "hist");
    354
    355	ret = trace_boot_hist_add_array(hnode, &buf, end, "keys");
    356	if (ret < 0) {
    357		if (ret == -ENOENT)
    358			pr_err("hist requires keys.\n");
    359		return -EINVAL;
    360	}
    361
    362	ret = trace_boot_hist_add_array(hnode, &buf, end, "values");
    363	if (ret == -EINVAL)
    364		return ret;
    365	ret = trace_boot_hist_add_array(hnode, &buf, end, "sort");
    366	if (ret == -EINVAL)
    367		return ret;
    368
    369	p = xbc_node_find_value(hnode, "size", NULL);
    370	if (p)
    371		append_printf(&buf, end, ":size=%s", p);
    372
    373	p = xbc_node_find_value(hnode, "name", NULL);
    374	if (p)
    375		append_printf(&buf, end, ":name=%s", p);
    376
    377	node = xbc_node_find_subkey(hnode, "var");
    378	if (node) {
    379		xbc_node_for_each_key_value(node, knode, p) {
    380			/* Expression must not include spaces. */
    381			append_printf(&buf, end, ":%s=",
    382				      xbc_node_get_data(knode));
    383			append_str_nospace(&buf, end, p);
    384		}
    385	}
    386
    387	/* Histogram control attributes (mutual exclusive) */
    388	if (xbc_node_find_value(hnode, "pause", NULL))
    389		append_printf(&buf, end, ":pause");
    390	else if (xbc_node_find_value(hnode, "continue", NULL))
    391		append_printf(&buf, end, ":continue");
    392	else if (xbc_node_find_value(hnode, "clear", NULL))
    393		append_printf(&buf, end, ":clear");
    394
    395	/* Histogram handler and actions */
    396	node = xbc_node_find_subkey(hnode, "onmax");
    397	if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
    398		return -EINVAL;
    399	node = xbc_node_find_subkey(hnode, "onchange");
    400	if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
    401		return -EINVAL;
    402	node = xbc_node_find_subkey(hnode, "onmatch");
    403	if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0)
    404		return -EINVAL;
    405
    406	p = xbc_node_find_value(hnode, "filter", NULL);
    407	if (p)
    408		append_printf(&buf, end, " if %s", p);
    409
    410	if (buf == end) {
    411		pr_err("hist exceeds the max command length.\n");
    412		return -E2BIG;
    413	}
    414
    415	return 0;
    416}
    417
    418static void __init
    419trace_boot_init_histograms(struct trace_event_file *file,
    420			   struct xbc_node *hnode, char *buf, size_t size)
    421{
    422	struct xbc_node *node;
    423	const char *p;
    424	char *tmp;
    425
    426	xbc_node_for_each_subkey(hnode, node) {
    427		p = xbc_node_get_data(node);
    428		if (!isdigit(p[0]))
    429			continue;
    430		/* All digit started node should be instances. */
    431		if (trace_boot_compose_hist_cmd(node, buf, size) == 0) {
    432			tmp = kstrdup(buf, GFP_KERNEL);
    433			if (!tmp)
    434				return;
    435			if (trigger_process_regex(file, buf) < 0)
    436				pr_err("Failed to apply hist trigger: %s\n", tmp);
    437			kfree(tmp);
    438		}
    439	}
    440
    441	if (xbc_node_find_subkey(hnode, "keys")) {
    442		if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) {
    443			tmp = kstrdup(buf, GFP_KERNEL);
    444			if (!tmp)
    445				return;
    446			if (trigger_process_regex(file, buf) < 0)
    447				pr_err("Failed to apply hist trigger: %s\n", tmp);
    448			kfree(tmp);
    449		}
    450	}
    451}
    452#else
    453static void __init
    454trace_boot_init_histograms(struct trace_event_file *file,
    455			   struct xbc_node *hnode, char *buf, size_t size)
    456{
    457	/* do nothing */
    458}
    459#endif
    460
    461static void __init
    462trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
    463			  struct xbc_node *enode)
    464{
    465	struct trace_event_file *file;
    466	struct xbc_node *anode;
    467	char buf[MAX_BUF_LEN];
    468	const char *p, *group, *event;
    469
    470	group = xbc_node_get_data(gnode);
    471	event = xbc_node_get_data(enode);
    472
    473	if (!strcmp(group, "kprobes"))
    474		if (trace_boot_add_kprobe_event(enode, event) < 0)
    475			return;
    476	if (!strcmp(group, "synthetic"))
    477		if (trace_boot_add_synth_event(enode, event) < 0)
    478			return;
    479
    480	mutex_lock(&event_mutex);
    481	file = find_event_file(tr, group, event);
    482	if (!file) {
    483		pr_err("Failed to find event: %s:%s\n", group, event);
    484		goto out;
    485	}
    486
    487	p = xbc_node_find_value(enode, "filter", NULL);
    488	if (p && *p != '\0') {
    489		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
    490			pr_err("filter string is too long: %s\n", p);
    491		else if (apply_event_filter(file, buf) < 0)
    492			pr_err("Failed to apply filter: %s\n", buf);
    493	}
    494
    495	if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
    496		xbc_node_for_each_array_value(enode, "actions", anode, p) {
    497			if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
    498				pr_err("action string is too long: %s\n", p);
    499			else if (trigger_process_regex(file, buf) < 0)
    500				pr_err("Failed to apply an action: %s\n", p);
    501		}
    502		anode = xbc_node_find_subkey(enode, "hist");
    503		if (anode)
    504			trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf));
    505	} else if (xbc_node_find_value(enode, "actions", NULL))
    506		pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
    507
    508	if (xbc_node_find_value(enode, "enable", NULL)) {
    509		if (trace_event_enable_disable(file, 1, 0) < 0)
    510			pr_err("Failed to enable event node: %s:%s\n",
    511				group, event);
    512	}
    513out:
    514	mutex_unlock(&event_mutex);
    515}
    516
    517static void __init
    518trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
    519{
    520	struct xbc_node *gnode, *enode;
    521	bool enable, enable_all = false;
    522	const char *data;
    523
    524	node = xbc_node_find_subkey(node, "event");
    525	if (!node)
    526		return;
    527	/* per-event key starts with "event.GROUP.EVENT" */
    528	xbc_node_for_each_subkey(node, gnode) {
    529		data = xbc_node_get_data(gnode);
    530		if (!strcmp(data, "enable")) {
    531			enable_all = true;
    532			continue;
    533		}
    534		enable = false;
    535		xbc_node_for_each_subkey(gnode, enode) {
    536			data = xbc_node_get_data(enode);
    537			if (!strcmp(data, "enable")) {
    538				enable = true;
    539				continue;
    540			}
    541			trace_boot_init_one_event(tr, gnode, enode);
    542		}
    543		/* Event enablement must be done after event settings */
    544		if (enable) {
    545			data = xbc_node_get_data(gnode);
    546			trace_array_set_clr_event(tr, data, NULL, true);
    547		}
    548	}
    549	/* Ditto */
    550	if (enable_all)
    551		trace_array_set_clr_event(tr, NULL, NULL, true);
    552}
    553#else
    554#define trace_boot_enable_events(tr, node) do {} while (0)
    555#define trace_boot_init_events(tr, node) do {} while (0)
    556#endif
    557
    558#ifdef CONFIG_DYNAMIC_FTRACE
    559static void __init
    560trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
    561{
    562	struct xbc_node *anode;
    563	const char *p;
    564	char *q;
    565
    566	xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
    567		q = kstrdup(p, GFP_KERNEL);
    568		if (!q)
    569			return;
    570		if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
    571			pr_err("Failed to add %s to ftrace filter\n", p);
    572		else
    573			ftrace_filter_param = true;
    574		kfree(q);
    575	}
    576	xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
    577		q = kstrdup(p, GFP_KERNEL);
    578		if (!q)
    579			return;
    580		if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
    581			pr_err("Failed to add %s to ftrace filter\n", p);
    582		else
    583			ftrace_filter_param = true;
    584		kfree(q);
    585	}
    586}
    587#else
    588#define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
    589#endif
    590
    591static void __init
    592trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
    593{
    594	const char *p;
    595
    596	trace_boot_set_ftrace_filter(tr, node);
    597
    598	p = xbc_node_find_value(node, "tracer", NULL);
    599	if (p && *p != '\0') {
    600		if (tracing_set_tracer(tr, p) < 0)
    601			pr_err("Failed to set given tracer: %s\n", p);
    602	}
    603
    604	/* Since tracer can free snapshot buffer, allocate snapshot here.*/
    605	if (xbc_node_find_value(node, "alloc_snapshot", NULL)) {
    606		if (tracing_alloc_snapshot_instance(tr) < 0)
    607			pr_err("Failed to allocate snapshot buffer\n");
    608	}
    609}
    610
    611static void __init
    612trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
    613{
    614	trace_boot_set_instance_options(tr, node);
    615	trace_boot_init_events(tr, node);
    616	trace_boot_enable_events(tr, node);
    617	trace_boot_enable_tracer(tr, node);
    618}
    619
    620static void __init
    621trace_boot_init_instances(struct xbc_node *node)
    622{
    623	struct xbc_node *inode;
    624	struct trace_array *tr;
    625	const char *p;
    626
    627	node = xbc_node_find_subkey(node, "instance");
    628	if (!node)
    629		return;
    630
    631	xbc_node_for_each_subkey(node, inode) {
    632		p = xbc_node_get_data(inode);
    633		if (!p || *p == '\0')
    634			continue;
    635
    636		tr = trace_array_get_by_name(p);
    637		if (!tr) {
    638			pr_err("Failed to get trace instance %s\n", p);
    639			continue;
    640		}
    641		trace_boot_init_one_instance(tr, inode);
    642		trace_array_put(tr);
    643	}
    644}
    645
    646static int __init trace_boot_init(void)
    647{
    648	struct xbc_node *trace_node;
    649	struct trace_array *tr;
    650
    651	trace_node = xbc_find_node("ftrace");
    652	if (!trace_node)
    653		return 0;
    654
    655	tr = top_trace_array();
    656	if (!tr)
    657		return 0;
    658
    659	/* Global trace array is also one instance */
    660	trace_boot_init_one_instance(tr, trace_node);
    661	trace_boot_init_instances(trace_node);
    662
    663	disable_tracing_selftest("running boot-time tracing");
    664
    665	return 0;
    666}
    667/*
    668 * Start tracing at the end of core-initcall, so that it starts tracing
    669 * from the beginning of postcore_initcall.
    670 */
    671core_initcall_sync(trace_boot_init);