cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.c (198651B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Infrastructure for profiling code inserted by 'gcc -pg'.
      4 *
      5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
      6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
      7 *
      8 * Originally ported from the -rt patch by:
      9 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
     10 *
     11 * Based on code in the latency_tracer, that is:
     12 *
     13 *  Copyright (C) 2004-2006 Ingo Molnar
     14 *  Copyright (C) 2004 Nadia Yvette Chambers
     15 */
     16
     17#include <linux/stop_machine.h>
     18#include <linux/clocksource.h>
     19#include <linux/sched/task.h>
     20#include <linux/kallsyms.h>
     21#include <linux/security.h>
     22#include <linux/seq_file.h>
     23#include <linux/tracefs.h>
     24#include <linux/hardirq.h>
     25#include <linux/kthread.h>
     26#include <linux/uaccess.h>
     27#include <linux/bsearch.h>
     28#include <linux/module.h>
     29#include <linux/ftrace.h>
     30#include <linux/sysctl.h>
     31#include <linux/slab.h>
     32#include <linux/ctype.h>
     33#include <linux/sort.h>
     34#include <linux/list.h>
     35#include <linux/hash.h>
     36#include <linux/rcupdate.h>
     37#include <linux/kprobes.h>
     38
     39#include <trace/events/sched.h>
     40
     41#include <asm/sections.h>
     42#include <asm/setup.h>
     43
     44#include "ftrace_internal.h"
     45#include "trace_output.h"
     46#include "trace_stat.h"
     47
     48#define FTRACE_INVALID_FUNCTION		"__ftrace_invalid_address__"
     49
     50#define FTRACE_WARN_ON(cond)			\
     51	({					\
     52		int ___r = cond;		\
     53		if (WARN_ON(___r))		\
     54			ftrace_kill();		\
     55		___r;				\
     56	})
     57
     58#define FTRACE_WARN_ON_ONCE(cond)		\
     59	({					\
     60		int ___r = cond;		\
     61		if (WARN_ON_ONCE(___r))		\
     62			ftrace_kill();		\
     63		___r;				\
     64	})
     65
     66/* hash bits for specific function selection */
     67#define FTRACE_HASH_DEFAULT_BITS 10
     68#define FTRACE_HASH_MAX_BITS 12
     69
     70#ifdef CONFIG_DYNAMIC_FTRACE
     71#define INIT_OPS_HASH(opsname)	\
     72	.func_hash		= &opsname.local_hash,			\
     73	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
     74#else
     75#define INIT_OPS_HASH(opsname)
     76#endif
     77
     78enum {
     79	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
     80	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
     81};
     82
     83struct ftrace_ops ftrace_list_end __read_mostly = {
     84	.func		= ftrace_stub,
     85	.flags		= FTRACE_OPS_FL_STUB,
     86	INIT_OPS_HASH(ftrace_list_end)
     87};
     88
     89/* ftrace_enabled is a method to turn ftrace on or off */
     90int ftrace_enabled __read_mostly;
     91static int __maybe_unused last_ftrace_enabled;
     92
     93/* Current function tracing op */
     94struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
     95/* What to set function_trace_op to */
     96static struct ftrace_ops *set_function_trace_op;
     97
     98static bool ftrace_pids_enabled(struct ftrace_ops *ops)
     99{
    100	struct trace_array *tr;
    101
    102	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
    103		return false;
    104
    105	tr = ops->private;
    106
    107	return tr->function_pids != NULL || tr->function_no_pids != NULL;
    108}
    109
    110static void ftrace_update_trampoline(struct ftrace_ops *ops);
    111
    112/*
    113 * ftrace_disabled is set when an anomaly is discovered.
    114 * ftrace_disabled is much stronger than ftrace_enabled.
    115 */
    116static int ftrace_disabled __read_mostly;
    117
    118DEFINE_MUTEX(ftrace_lock);
    119
    120struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
    121ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
    122struct ftrace_ops global_ops;
    123
    124/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
    125void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
    126			  struct ftrace_ops *op, struct ftrace_regs *fregs);
    127
    128static inline void ftrace_ops_init(struct ftrace_ops *ops)
    129{
    130#ifdef CONFIG_DYNAMIC_FTRACE
    131	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
    132		mutex_init(&ops->local_hash.regex_lock);
    133		ops->func_hash = &ops->local_hash;
    134		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
    135	}
    136#endif
    137}
    138
    139static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
    140			    struct ftrace_ops *op, struct ftrace_regs *fregs)
    141{
    142	struct trace_array *tr = op->private;
    143	int pid;
    144
    145	if (tr) {
    146		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
    147		if (pid == FTRACE_PID_IGNORE)
    148			return;
    149		if (pid != FTRACE_PID_TRACE &&
    150		    pid != current->pid)
    151			return;
    152	}
    153
    154	op->saved_func(ip, parent_ip, op, fregs);
    155}
    156
    157static void ftrace_sync_ipi(void *data)
    158{
    159	/* Probably not needed, but do it anyway */
    160	smp_rmb();
    161}
    162
    163static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
    164{
    165	/*
    166	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
    167	 * then it needs to call the list anyway.
    168	 */
    169	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
    170	    FTRACE_FORCE_LIST_FUNC)
    171		return ftrace_ops_list_func;
    172
    173	return ftrace_ops_get_func(ops);
    174}
    175
    176static void update_ftrace_function(void)
    177{
    178	ftrace_func_t func;
    179
    180	/*
    181	 * Prepare the ftrace_ops that the arch callback will use.
    182	 * If there's only one ftrace_ops registered, the ftrace_ops_list
    183	 * will point to the ops we want.
    184	 */
    185	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
    186						lockdep_is_held(&ftrace_lock));
    187
    188	/* If there's no ftrace_ops registered, just call the stub function */
    189	if (set_function_trace_op == &ftrace_list_end) {
    190		func = ftrace_stub;
    191
    192	/*
    193	 * If we are at the end of the list and this ops is
    194	 * recursion safe and not dynamic and the arch supports passing ops,
    195	 * then have the mcount trampoline call the function directly.
    196	 */
    197	} else if (rcu_dereference_protected(ftrace_ops_list->next,
    198			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
    199		func = ftrace_ops_get_list_func(ftrace_ops_list);
    200
    201	} else {
    202		/* Just use the default ftrace_ops */
    203		set_function_trace_op = &ftrace_list_end;
    204		func = ftrace_ops_list_func;
    205	}
    206
    207	update_function_graph_func();
    208
    209	/* If there's no change, then do nothing more here */
    210	if (ftrace_trace_function == func)
    211		return;
    212
    213	/*
    214	 * If we are using the list function, it doesn't care
    215	 * about the function_trace_ops.
    216	 */
    217	if (func == ftrace_ops_list_func) {
    218		ftrace_trace_function = func;
    219		/*
    220		 * Don't even bother setting function_trace_ops,
    221		 * it would be racy to do so anyway.
    222		 */
    223		return;
    224	}
    225
    226#ifndef CONFIG_DYNAMIC_FTRACE
    227	/*
    228	 * For static tracing, we need to be a bit more careful.
    229	 * The function change takes affect immediately. Thus,
    230	 * we need to coordinate the setting of the function_trace_ops
    231	 * with the setting of the ftrace_trace_function.
    232	 *
    233	 * Set the function to the list ops, which will call the
    234	 * function we want, albeit indirectly, but it handles the
    235	 * ftrace_ops and doesn't depend on function_trace_op.
    236	 */
    237	ftrace_trace_function = ftrace_ops_list_func;
    238	/*
    239	 * Make sure all CPUs see this. Yes this is slow, but static
    240	 * tracing is slow and nasty to have enabled.
    241	 */
    242	synchronize_rcu_tasks_rude();
    243	/* Now all cpus are using the list ops. */
    244	function_trace_op = set_function_trace_op;
    245	/* Make sure the function_trace_op is visible on all CPUs */
    246	smp_wmb();
    247	/* Nasty way to force a rmb on all cpus */
    248	smp_call_function(ftrace_sync_ipi, NULL, 1);
    249	/* OK, we are all set to update the ftrace_trace_function now! */
    250#endif /* !CONFIG_DYNAMIC_FTRACE */
    251
    252	ftrace_trace_function = func;
    253}
    254
    255static void add_ftrace_ops(struct ftrace_ops __rcu **list,
    256			   struct ftrace_ops *ops)
    257{
    258	rcu_assign_pointer(ops->next, *list);
    259
    260	/*
    261	 * We are entering ops into the list but another
    262	 * CPU might be walking that list. We need to make sure
    263	 * the ops->next pointer is valid before another CPU sees
    264	 * the ops pointer included into the list.
    265	 */
    266	rcu_assign_pointer(*list, ops);
    267}
    268
    269static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
    270			     struct ftrace_ops *ops)
    271{
    272	struct ftrace_ops **p;
    273
    274	/*
    275	 * If we are removing the last function, then simply point
    276	 * to the ftrace_stub.
    277	 */
    278	if (rcu_dereference_protected(*list,
    279			lockdep_is_held(&ftrace_lock)) == ops &&
    280	    rcu_dereference_protected(ops->next,
    281			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
    282		*list = &ftrace_list_end;
    283		return 0;
    284	}
    285
    286	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
    287		if (*p == ops)
    288			break;
    289
    290	if (*p != ops)
    291		return -1;
    292
    293	*p = (*p)->next;
    294	return 0;
    295}
    296
    297static void ftrace_update_trampoline(struct ftrace_ops *ops);
    298
    299int __register_ftrace_function(struct ftrace_ops *ops)
    300{
    301	if (ops->flags & FTRACE_OPS_FL_DELETED)
    302		return -EINVAL;
    303
    304	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
    305		return -EBUSY;
    306
    307#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    308	/*
    309	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
    310	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
    311	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
    312	 */
    313	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
    314	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
    315		return -EINVAL;
    316
    317	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
    318		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
    319#endif
    320	if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
    321		return -EBUSY;
    322
    323	if (!is_kernel_core_data((unsigned long)ops))
    324		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
    325
    326	add_ftrace_ops(&ftrace_ops_list, ops);
    327
    328	/* Always save the function, and reset at unregistering */
    329	ops->saved_func = ops->func;
    330
    331	if (ftrace_pids_enabled(ops))
    332		ops->func = ftrace_pid_func;
    333
    334	ftrace_update_trampoline(ops);
    335
    336	if (ftrace_enabled)
    337		update_ftrace_function();
    338
    339	return 0;
    340}
    341
    342int __unregister_ftrace_function(struct ftrace_ops *ops)
    343{
    344	int ret;
    345
    346	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
    347		return -EBUSY;
    348
    349	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
    350
    351	if (ret < 0)
    352		return ret;
    353
    354	if (ftrace_enabled)
    355		update_ftrace_function();
    356
    357	ops->func = ops->saved_func;
    358
    359	return 0;
    360}
    361
    362static void ftrace_update_pid_func(void)
    363{
    364	struct ftrace_ops *op;
    365
    366	/* Only do something if we are tracing something */
    367	if (ftrace_trace_function == ftrace_stub)
    368		return;
    369
    370	do_for_each_ftrace_op(op, ftrace_ops_list) {
    371		if (op->flags & FTRACE_OPS_FL_PID) {
    372			op->func = ftrace_pids_enabled(op) ?
    373				ftrace_pid_func : op->saved_func;
    374			ftrace_update_trampoline(op);
    375		}
    376	} while_for_each_ftrace_op(op);
    377
    378	update_ftrace_function();
    379}
    380
    381#ifdef CONFIG_FUNCTION_PROFILER
    382struct ftrace_profile {
    383	struct hlist_node		node;
    384	unsigned long			ip;
    385	unsigned long			counter;
    386#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    387	unsigned long long		time;
    388	unsigned long long		time_squared;
    389#endif
    390};
    391
    392struct ftrace_profile_page {
    393	struct ftrace_profile_page	*next;
    394	unsigned long			index;
    395	struct ftrace_profile		records[];
    396};
    397
    398struct ftrace_profile_stat {
    399	atomic_t			disabled;
    400	struct hlist_head		*hash;
    401	struct ftrace_profile_page	*pages;
    402	struct ftrace_profile_page	*start;
    403	struct tracer_stat		stat;
    404};
    405
    406#define PROFILE_RECORDS_SIZE						\
    407	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
    408
    409#define PROFILES_PER_PAGE					\
    410	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
    411
    412static int ftrace_profile_enabled __read_mostly;
    413
    414/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
    415static DEFINE_MUTEX(ftrace_profile_lock);
    416
    417static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
    418
    419#define FTRACE_PROFILE_HASH_BITS 10
    420#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
    421
    422static void *
    423function_stat_next(void *v, int idx)
    424{
    425	struct ftrace_profile *rec = v;
    426	struct ftrace_profile_page *pg;
    427
    428	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
    429
    430 again:
    431	if (idx != 0)
    432		rec++;
    433
    434	if ((void *)rec >= (void *)&pg->records[pg->index]) {
    435		pg = pg->next;
    436		if (!pg)
    437			return NULL;
    438		rec = &pg->records[0];
    439		if (!rec->counter)
    440			goto again;
    441	}
    442
    443	return rec;
    444}
    445
    446static void *function_stat_start(struct tracer_stat *trace)
    447{
    448	struct ftrace_profile_stat *stat =
    449		container_of(trace, struct ftrace_profile_stat, stat);
    450
    451	if (!stat || !stat->start)
    452		return NULL;
    453
    454	return function_stat_next(&stat->start->records[0], 0);
    455}
    456
    457#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    458/* function graph compares on total time */
    459static int function_stat_cmp(const void *p1, const void *p2)
    460{
    461	const struct ftrace_profile *a = p1;
    462	const struct ftrace_profile *b = p2;
    463
    464	if (a->time < b->time)
    465		return -1;
    466	if (a->time > b->time)
    467		return 1;
    468	else
    469		return 0;
    470}
    471#else
    472/* not function graph compares against hits */
    473static int function_stat_cmp(const void *p1, const void *p2)
    474{
    475	const struct ftrace_profile *a = p1;
    476	const struct ftrace_profile *b = p2;
    477
    478	if (a->counter < b->counter)
    479		return -1;
    480	if (a->counter > b->counter)
    481		return 1;
    482	else
    483		return 0;
    484}
    485#endif
    486
    487static int function_stat_headers(struct seq_file *m)
    488{
    489#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    490	seq_puts(m, "  Function                               "
    491		 "Hit    Time            Avg             s^2\n"
    492		    "  --------                               "
    493		 "---    ----            ---             ---\n");
    494#else
    495	seq_puts(m, "  Function                               Hit\n"
    496		    "  --------                               ---\n");
    497#endif
    498	return 0;
    499}
    500
    501static int function_stat_show(struct seq_file *m, void *v)
    502{
    503	struct ftrace_profile *rec = v;
    504	char str[KSYM_SYMBOL_LEN];
    505	int ret = 0;
    506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    507	static struct trace_seq s;
    508	unsigned long long avg;
    509	unsigned long long stddev;
    510#endif
    511	mutex_lock(&ftrace_profile_lock);
    512
    513	/* we raced with function_profile_reset() */
    514	if (unlikely(rec->counter == 0)) {
    515		ret = -EBUSY;
    516		goto out;
    517	}
    518
    519#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    520	avg = div64_ul(rec->time, rec->counter);
    521	if (tracing_thresh && (avg < tracing_thresh))
    522		goto out;
    523#endif
    524
    525	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
    526	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
    527
    528#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    529	seq_puts(m, "    ");
    530
    531	/* Sample standard deviation (s^2) */
    532	if (rec->counter <= 1)
    533		stddev = 0;
    534	else {
    535		/*
    536		 * Apply Welford's method:
    537		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
    538		 */
    539		stddev = rec->counter * rec->time_squared -
    540			 rec->time * rec->time;
    541
    542		/*
    543		 * Divide only 1000 for ns^2 -> us^2 conversion.
    544		 * trace_print_graph_duration will divide 1000 again.
    545		 */
    546		stddev = div64_ul(stddev,
    547				  rec->counter * (rec->counter - 1) * 1000);
    548	}
    549
    550	trace_seq_init(&s);
    551	trace_print_graph_duration(rec->time, &s);
    552	trace_seq_puts(&s, "    ");
    553	trace_print_graph_duration(avg, &s);
    554	trace_seq_puts(&s, "    ");
    555	trace_print_graph_duration(stddev, &s);
    556	trace_print_seq(m, &s);
    557#endif
    558	seq_putc(m, '\n');
    559out:
    560	mutex_unlock(&ftrace_profile_lock);
    561
    562	return ret;
    563}
    564
    565static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
    566{
    567	struct ftrace_profile_page *pg;
    568
    569	pg = stat->pages = stat->start;
    570
    571	while (pg) {
    572		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
    573		pg->index = 0;
    574		pg = pg->next;
    575	}
    576
    577	memset(stat->hash, 0,
    578	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
    579}
    580
    581static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
    582{
    583	struct ftrace_profile_page *pg;
    584	int functions;
    585	int pages;
    586	int i;
    587
    588	/* If we already allocated, do nothing */
    589	if (stat->pages)
    590		return 0;
    591
    592	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
    593	if (!stat->pages)
    594		return -ENOMEM;
    595
    596#ifdef CONFIG_DYNAMIC_FTRACE
    597	functions = ftrace_update_tot_cnt;
    598#else
    599	/*
    600	 * We do not know the number of functions that exist because
    601	 * dynamic tracing is what counts them. With past experience
    602	 * we have around 20K functions. That should be more than enough.
    603	 * It is highly unlikely we will execute every function in
    604	 * the kernel.
    605	 */
    606	functions = 20000;
    607#endif
    608
    609	pg = stat->start = stat->pages;
    610
    611	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
    612
    613	for (i = 1; i < pages; i++) {
    614		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
    615		if (!pg->next)
    616			goto out_free;
    617		pg = pg->next;
    618	}
    619
    620	return 0;
    621
    622 out_free:
    623	pg = stat->start;
    624	while (pg) {
    625		unsigned long tmp = (unsigned long)pg;
    626
    627		pg = pg->next;
    628		free_page(tmp);
    629	}
    630
    631	stat->pages = NULL;
    632	stat->start = NULL;
    633
    634	return -ENOMEM;
    635}
    636
    637static int ftrace_profile_init_cpu(int cpu)
    638{
    639	struct ftrace_profile_stat *stat;
    640	int size;
    641
    642	stat = &per_cpu(ftrace_profile_stats, cpu);
    643
    644	if (stat->hash) {
    645		/* If the profile is already created, simply reset it */
    646		ftrace_profile_reset(stat);
    647		return 0;
    648	}
    649
    650	/*
    651	 * We are profiling all functions, but usually only a few thousand
    652	 * functions are hit. We'll make a hash of 1024 items.
    653	 */
    654	size = FTRACE_PROFILE_HASH_SIZE;
    655
    656	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
    657
    658	if (!stat->hash)
    659		return -ENOMEM;
    660
    661	/* Preallocate the function profiling pages */
    662	if (ftrace_profile_pages_init(stat) < 0) {
    663		kfree(stat->hash);
    664		stat->hash = NULL;
    665		return -ENOMEM;
    666	}
    667
    668	return 0;
    669}
    670
    671static int ftrace_profile_init(void)
    672{
    673	int cpu;
    674	int ret = 0;
    675
    676	for_each_possible_cpu(cpu) {
    677		ret = ftrace_profile_init_cpu(cpu);
    678		if (ret)
    679			break;
    680	}
    681
    682	return ret;
    683}
    684
    685/* interrupts must be disabled */
    686static struct ftrace_profile *
    687ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
    688{
    689	struct ftrace_profile *rec;
    690	struct hlist_head *hhd;
    691	unsigned long key;
    692
    693	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
    694	hhd = &stat->hash[key];
    695
    696	if (hlist_empty(hhd))
    697		return NULL;
    698
    699	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
    700		if (rec->ip == ip)
    701			return rec;
    702	}
    703
    704	return NULL;
    705}
    706
    707static void ftrace_add_profile(struct ftrace_profile_stat *stat,
    708			       struct ftrace_profile *rec)
    709{
    710	unsigned long key;
    711
    712	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
    713	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
    714}
    715
    716/*
    717 * The memory is already allocated, this simply finds a new record to use.
    718 */
    719static struct ftrace_profile *
    720ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
    721{
    722	struct ftrace_profile *rec = NULL;
    723
    724	/* prevent recursion (from NMIs) */
    725	if (atomic_inc_return(&stat->disabled) != 1)
    726		goto out;
    727
    728	/*
    729	 * Try to find the function again since an NMI
    730	 * could have added it
    731	 */
    732	rec = ftrace_find_profiled_func(stat, ip);
    733	if (rec)
    734		goto out;
    735
    736	if (stat->pages->index == PROFILES_PER_PAGE) {
    737		if (!stat->pages->next)
    738			goto out;
    739		stat->pages = stat->pages->next;
    740	}
    741
    742	rec = &stat->pages->records[stat->pages->index++];
    743	rec->ip = ip;
    744	ftrace_add_profile(stat, rec);
    745
    746 out:
    747	atomic_dec(&stat->disabled);
    748
    749	return rec;
    750}
    751
    752static void
    753function_profile_call(unsigned long ip, unsigned long parent_ip,
    754		      struct ftrace_ops *ops, struct ftrace_regs *fregs)
    755{
    756	struct ftrace_profile_stat *stat;
    757	struct ftrace_profile *rec;
    758	unsigned long flags;
    759
    760	if (!ftrace_profile_enabled)
    761		return;
    762
    763	local_irq_save(flags);
    764
    765	stat = this_cpu_ptr(&ftrace_profile_stats);
    766	if (!stat->hash || !ftrace_profile_enabled)
    767		goto out;
    768
    769	rec = ftrace_find_profiled_func(stat, ip);
    770	if (!rec) {
    771		rec = ftrace_profile_alloc(stat, ip);
    772		if (!rec)
    773			goto out;
    774	}
    775
    776	rec->counter++;
    777 out:
    778	local_irq_restore(flags);
    779}
    780
    781#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    782static bool fgraph_graph_time = true;
    783
    784void ftrace_graph_graph_time_control(bool enable)
    785{
    786	fgraph_graph_time = enable;
    787}
    788
    789static int profile_graph_entry(struct ftrace_graph_ent *trace)
    790{
    791	struct ftrace_ret_stack *ret_stack;
    792
    793	function_profile_call(trace->func, 0, NULL, NULL);
    794
    795	/* If function graph is shutting down, ret_stack can be NULL */
    796	if (!current->ret_stack)
    797		return 0;
    798
    799	ret_stack = ftrace_graph_get_ret_stack(current, 0);
    800	if (ret_stack)
    801		ret_stack->subtime = 0;
    802
    803	return 1;
    804}
    805
    806static void profile_graph_return(struct ftrace_graph_ret *trace)
    807{
    808	struct ftrace_ret_stack *ret_stack;
    809	struct ftrace_profile_stat *stat;
    810	unsigned long long calltime;
    811	struct ftrace_profile *rec;
    812	unsigned long flags;
    813
    814	local_irq_save(flags);
    815	stat = this_cpu_ptr(&ftrace_profile_stats);
    816	if (!stat->hash || !ftrace_profile_enabled)
    817		goto out;
    818
    819	/* If the calltime was zero'd ignore it */
    820	if (!trace->calltime)
    821		goto out;
    822
    823	calltime = trace->rettime - trace->calltime;
    824
    825	if (!fgraph_graph_time) {
    826
    827		/* Append this call time to the parent time to subtract */
    828		ret_stack = ftrace_graph_get_ret_stack(current, 1);
    829		if (ret_stack)
    830			ret_stack->subtime += calltime;
    831
    832		ret_stack = ftrace_graph_get_ret_stack(current, 0);
    833		if (ret_stack && ret_stack->subtime < calltime)
    834			calltime -= ret_stack->subtime;
    835		else
    836			calltime = 0;
    837	}
    838
    839	rec = ftrace_find_profiled_func(stat, trace->func);
    840	if (rec) {
    841		rec->time += calltime;
    842		rec->time_squared += calltime * calltime;
    843	}
    844
    845 out:
    846	local_irq_restore(flags);
    847}
    848
    849static struct fgraph_ops fprofiler_ops = {
    850	.entryfunc = &profile_graph_entry,
    851	.retfunc = &profile_graph_return,
    852};
    853
    854static int register_ftrace_profiler(void)
    855{
    856	return register_ftrace_graph(&fprofiler_ops);
    857}
    858
    859static void unregister_ftrace_profiler(void)
    860{
    861	unregister_ftrace_graph(&fprofiler_ops);
    862}
    863#else
    864static struct ftrace_ops ftrace_profile_ops __read_mostly = {
    865	.func		= function_profile_call,
    866	.flags		= FTRACE_OPS_FL_INITIALIZED,
    867	INIT_OPS_HASH(ftrace_profile_ops)
    868};
    869
    870static int register_ftrace_profiler(void)
    871{
    872	return register_ftrace_function(&ftrace_profile_ops);
    873}
    874
    875static void unregister_ftrace_profiler(void)
    876{
    877	unregister_ftrace_function(&ftrace_profile_ops);
    878}
    879#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
    880
    881static ssize_t
    882ftrace_profile_write(struct file *filp, const char __user *ubuf,
    883		     size_t cnt, loff_t *ppos)
    884{
    885	unsigned long val;
    886	int ret;
    887
    888	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    889	if (ret)
    890		return ret;
    891
    892	val = !!val;
    893
    894	mutex_lock(&ftrace_profile_lock);
    895	if (ftrace_profile_enabled ^ val) {
    896		if (val) {
    897			ret = ftrace_profile_init();
    898			if (ret < 0) {
    899				cnt = ret;
    900				goto out;
    901			}
    902
    903			ret = register_ftrace_profiler();
    904			if (ret < 0) {
    905				cnt = ret;
    906				goto out;
    907			}
    908			ftrace_profile_enabled = 1;
    909		} else {
    910			ftrace_profile_enabled = 0;
    911			/*
    912			 * unregister_ftrace_profiler calls stop_machine
    913			 * so this acts like an synchronize_rcu.
    914			 */
    915			unregister_ftrace_profiler();
    916		}
    917	}
    918 out:
    919	mutex_unlock(&ftrace_profile_lock);
    920
    921	*ppos += cnt;
    922
    923	return cnt;
    924}
    925
    926static ssize_t
    927ftrace_profile_read(struct file *filp, char __user *ubuf,
    928		     size_t cnt, loff_t *ppos)
    929{
    930	char buf[64];		/* big enough to hold a number */
    931	int r;
    932
    933	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
    934	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    935}
    936
    937static const struct file_operations ftrace_profile_fops = {
    938	.open		= tracing_open_generic,
    939	.read		= ftrace_profile_read,
    940	.write		= ftrace_profile_write,
    941	.llseek		= default_llseek,
    942};
    943
    944/* used to initialize the real stat files */
    945static struct tracer_stat function_stats __initdata = {
    946	.name		= "functions",
    947	.stat_start	= function_stat_start,
    948	.stat_next	= function_stat_next,
    949	.stat_cmp	= function_stat_cmp,
    950	.stat_headers	= function_stat_headers,
    951	.stat_show	= function_stat_show
    952};
    953
    954static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
    955{
    956	struct ftrace_profile_stat *stat;
    957	char *name;
    958	int ret;
    959	int cpu;
    960
    961	for_each_possible_cpu(cpu) {
    962		stat = &per_cpu(ftrace_profile_stats, cpu);
    963
    964		name = kasprintf(GFP_KERNEL, "function%d", cpu);
    965		if (!name) {
    966			/*
    967			 * The files created are permanent, if something happens
    968			 * we still do not free memory.
    969			 */
    970			WARN(1,
    971			     "Could not allocate stat file for cpu %d\n",
    972			     cpu);
    973			return;
    974		}
    975		stat->stat = function_stats;
    976		stat->stat.name = name;
    977		ret = register_stat_tracer(&stat->stat);
    978		if (ret) {
    979			WARN(1,
    980			     "Could not register function stat for cpu %d\n",
    981			     cpu);
    982			kfree(name);
    983			return;
    984		}
    985	}
    986
    987	trace_create_file("function_profile_enabled",
    988			  TRACE_MODE_WRITE, d_tracer, NULL,
    989			  &ftrace_profile_fops);
    990}
    991
    992#else /* CONFIG_FUNCTION_PROFILER */
    993static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
    994{
    995}
    996#endif /* CONFIG_FUNCTION_PROFILER */
    997
    998#ifdef CONFIG_DYNAMIC_FTRACE
    999
   1000static struct ftrace_ops *removed_ops;
   1001
   1002/*
   1003 * Set when doing a global update, like enabling all recs or disabling them.
   1004 * It is not set when just updating a single ftrace_ops.
   1005 */
   1006static bool update_all_ops;
   1007
   1008#ifndef CONFIG_FTRACE_MCOUNT_RECORD
   1009# error Dynamic ftrace depends on MCOUNT_RECORD
   1010#endif
   1011
   1012struct ftrace_func_probe {
   1013	struct ftrace_probe_ops	*probe_ops;
   1014	struct ftrace_ops	ops;
   1015	struct trace_array	*tr;
   1016	struct list_head	list;
   1017	void			*data;
   1018	int			ref;
   1019};
   1020
   1021/*
   1022 * We make these constant because no one should touch them,
   1023 * but they are used as the default "empty hash", to avoid allocating
   1024 * it all the time. These are in a read only section such that if
   1025 * anyone does try to modify it, it will cause an exception.
   1026 */
   1027static const struct hlist_head empty_buckets[1];
   1028static const struct ftrace_hash empty_hash = {
   1029	.buckets = (struct hlist_head *)empty_buckets,
   1030};
   1031#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
   1032
   1033struct ftrace_ops global_ops = {
   1034	.func				= ftrace_stub,
   1035	.local_hash.notrace_hash	= EMPTY_HASH,
   1036	.local_hash.filter_hash		= EMPTY_HASH,
   1037	INIT_OPS_HASH(global_ops)
   1038	.flags				= FTRACE_OPS_FL_INITIALIZED |
   1039					  FTRACE_OPS_FL_PID,
   1040};
   1041
   1042/*
   1043 * Used by the stack unwinder to know about dynamic ftrace trampolines.
   1044 */
   1045struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
   1046{
   1047	struct ftrace_ops *op = NULL;
   1048
   1049	/*
   1050	 * Some of the ops may be dynamically allocated,
   1051	 * they are freed after a synchronize_rcu().
   1052	 */
   1053	preempt_disable_notrace();
   1054
   1055	do_for_each_ftrace_op(op, ftrace_ops_list) {
   1056		/*
   1057		 * This is to check for dynamically allocated trampolines.
   1058		 * Trampolines that are in kernel text will have
   1059		 * core_kernel_text() return true.
   1060		 */
   1061		if (op->trampoline && op->trampoline_size)
   1062			if (addr >= op->trampoline &&
   1063			    addr < op->trampoline + op->trampoline_size) {
   1064				preempt_enable_notrace();
   1065				return op;
   1066			}
   1067	} while_for_each_ftrace_op(op);
   1068	preempt_enable_notrace();
   1069
   1070	return NULL;
   1071}
   1072
   1073/*
   1074 * This is used by __kernel_text_address() to return true if the
   1075 * address is on a dynamically allocated trampoline that would
   1076 * not return true for either core_kernel_text() or
   1077 * is_module_text_address().
   1078 */
   1079bool is_ftrace_trampoline(unsigned long addr)
   1080{
   1081	return ftrace_ops_trampoline(addr) != NULL;
   1082}
   1083
   1084struct ftrace_page {
   1085	struct ftrace_page	*next;
   1086	struct dyn_ftrace	*records;
   1087	int			index;
   1088	int			order;
   1089};
   1090
   1091#define ENTRY_SIZE sizeof(struct dyn_ftrace)
   1092#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
   1093
   1094static struct ftrace_page	*ftrace_pages_start;
   1095static struct ftrace_page	*ftrace_pages;
   1096
   1097static __always_inline unsigned long
   1098ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
   1099{
   1100	if (hash->size_bits > 0)
   1101		return hash_long(ip, hash->size_bits);
   1102
   1103	return 0;
   1104}
   1105
   1106/* Only use this function if ftrace_hash_empty() has already been tested */
   1107static __always_inline struct ftrace_func_entry *
   1108__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
   1109{
   1110	unsigned long key;
   1111	struct ftrace_func_entry *entry;
   1112	struct hlist_head *hhd;
   1113
   1114	key = ftrace_hash_key(hash, ip);
   1115	hhd = &hash->buckets[key];
   1116
   1117	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
   1118		if (entry->ip == ip)
   1119			return entry;
   1120	}
   1121	return NULL;
   1122}
   1123
   1124/**
   1125 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
   1126 * @hash: The hash to look at
   1127 * @ip: The instruction pointer to test
   1128 *
   1129 * Search a given @hash to see if a given instruction pointer (@ip)
   1130 * exists in it.
   1131 *
   1132 * Returns the entry that holds the @ip if found. NULL otherwise.
   1133 */
   1134struct ftrace_func_entry *
   1135ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
   1136{
   1137	if (ftrace_hash_empty(hash))
   1138		return NULL;
   1139
   1140	return __ftrace_lookup_ip(hash, ip);
   1141}
   1142
   1143static void __add_hash_entry(struct ftrace_hash *hash,
   1144			     struct ftrace_func_entry *entry)
   1145{
   1146	struct hlist_head *hhd;
   1147	unsigned long key;
   1148
   1149	key = ftrace_hash_key(hash, entry->ip);
   1150	hhd = &hash->buckets[key];
   1151	hlist_add_head(&entry->hlist, hhd);
   1152	hash->count++;
   1153}
   1154
   1155static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
   1156{
   1157	struct ftrace_func_entry *entry;
   1158
   1159	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
   1160	if (!entry)
   1161		return -ENOMEM;
   1162
   1163	entry->ip = ip;
   1164	__add_hash_entry(hash, entry);
   1165
   1166	return 0;
   1167}
   1168
   1169static void
   1170free_hash_entry(struct ftrace_hash *hash,
   1171		  struct ftrace_func_entry *entry)
   1172{
   1173	hlist_del(&entry->hlist);
   1174	kfree(entry);
   1175	hash->count--;
   1176}
   1177
   1178static void
   1179remove_hash_entry(struct ftrace_hash *hash,
   1180		  struct ftrace_func_entry *entry)
   1181{
   1182	hlist_del_rcu(&entry->hlist);
   1183	hash->count--;
   1184}
   1185
   1186static void ftrace_hash_clear(struct ftrace_hash *hash)
   1187{
   1188	struct hlist_head *hhd;
   1189	struct hlist_node *tn;
   1190	struct ftrace_func_entry *entry;
   1191	int size = 1 << hash->size_bits;
   1192	int i;
   1193
   1194	if (!hash->count)
   1195		return;
   1196
   1197	for (i = 0; i < size; i++) {
   1198		hhd = &hash->buckets[i];
   1199		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
   1200			free_hash_entry(hash, entry);
   1201	}
   1202	FTRACE_WARN_ON(hash->count);
   1203}
   1204
   1205static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
   1206{
   1207	list_del(&ftrace_mod->list);
   1208	kfree(ftrace_mod->module);
   1209	kfree(ftrace_mod->func);
   1210	kfree(ftrace_mod);
   1211}
   1212
   1213static void clear_ftrace_mod_list(struct list_head *head)
   1214{
   1215	struct ftrace_mod_load *p, *n;
   1216
   1217	/* stack tracer isn't supported yet */
   1218	if (!head)
   1219		return;
   1220
   1221	mutex_lock(&ftrace_lock);
   1222	list_for_each_entry_safe(p, n, head, list)
   1223		free_ftrace_mod(p);
   1224	mutex_unlock(&ftrace_lock);
   1225}
   1226
   1227static void free_ftrace_hash(struct ftrace_hash *hash)
   1228{
   1229	if (!hash || hash == EMPTY_HASH)
   1230		return;
   1231	ftrace_hash_clear(hash);
   1232	kfree(hash->buckets);
   1233	kfree(hash);
   1234}
   1235
   1236static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
   1237{
   1238	struct ftrace_hash *hash;
   1239
   1240	hash = container_of(rcu, struct ftrace_hash, rcu);
   1241	free_ftrace_hash(hash);
   1242}
   1243
   1244static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
   1245{
   1246	if (!hash || hash == EMPTY_HASH)
   1247		return;
   1248	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
   1249}
   1250
   1251void ftrace_free_filter(struct ftrace_ops *ops)
   1252{
   1253	ftrace_ops_init(ops);
   1254	free_ftrace_hash(ops->func_hash->filter_hash);
   1255	free_ftrace_hash(ops->func_hash->notrace_hash);
   1256}
   1257
   1258static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
   1259{
   1260	struct ftrace_hash *hash;
   1261	int size;
   1262
   1263	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
   1264	if (!hash)
   1265		return NULL;
   1266
   1267	size = 1 << size_bits;
   1268	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
   1269
   1270	if (!hash->buckets) {
   1271		kfree(hash);
   1272		return NULL;
   1273	}
   1274
   1275	hash->size_bits = size_bits;
   1276
   1277	return hash;
   1278}
   1279
   1280
   1281static int ftrace_add_mod(struct trace_array *tr,
   1282			  const char *func, const char *module,
   1283			  int enable)
   1284{
   1285	struct ftrace_mod_load *ftrace_mod;
   1286	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
   1287
   1288	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
   1289	if (!ftrace_mod)
   1290		return -ENOMEM;
   1291
   1292	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
   1293	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
   1294	ftrace_mod->enable = enable;
   1295
   1296	if (!ftrace_mod->func || !ftrace_mod->module)
   1297		goto out_free;
   1298
   1299	list_add(&ftrace_mod->list, mod_head);
   1300
   1301	return 0;
   1302
   1303 out_free:
   1304	free_ftrace_mod(ftrace_mod);
   1305
   1306	return -ENOMEM;
   1307}
   1308
   1309static struct ftrace_hash *
   1310alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
   1311{
   1312	struct ftrace_func_entry *entry;
   1313	struct ftrace_hash *new_hash;
   1314	int size;
   1315	int ret;
   1316	int i;
   1317
   1318	new_hash = alloc_ftrace_hash(size_bits);
   1319	if (!new_hash)
   1320		return NULL;
   1321
   1322	if (hash)
   1323		new_hash->flags = hash->flags;
   1324
   1325	/* Empty hash? */
   1326	if (ftrace_hash_empty(hash))
   1327		return new_hash;
   1328
   1329	size = 1 << hash->size_bits;
   1330	for (i = 0; i < size; i++) {
   1331		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
   1332			ret = add_hash_entry(new_hash, entry->ip);
   1333			if (ret < 0)
   1334				goto free_hash;
   1335		}
   1336	}
   1337
   1338	FTRACE_WARN_ON(new_hash->count != hash->count);
   1339
   1340	return new_hash;
   1341
   1342 free_hash:
   1343	free_ftrace_hash(new_hash);
   1344	return NULL;
   1345}
   1346
   1347static void
   1348ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
   1349static void
   1350ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
   1351
   1352static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
   1353				       struct ftrace_hash *new_hash);
   1354
   1355static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
   1356{
   1357	struct ftrace_func_entry *entry;
   1358	struct ftrace_hash *new_hash;
   1359	struct hlist_head *hhd;
   1360	struct hlist_node *tn;
   1361	int bits = 0;
   1362	int i;
   1363
   1364	/*
   1365	 * Use around half the size (max bit of it), but
   1366	 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
   1367	 */
   1368	bits = fls(size / 2);
   1369
   1370	/* Don't allocate too much */
   1371	if (bits > FTRACE_HASH_MAX_BITS)
   1372		bits = FTRACE_HASH_MAX_BITS;
   1373
   1374	new_hash = alloc_ftrace_hash(bits);
   1375	if (!new_hash)
   1376		return NULL;
   1377
   1378	new_hash->flags = src->flags;
   1379
   1380	size = 1 << src->size_bits;
   1381	for (i = 0; i < size; i++) {
   1382		hhd = &src->buckets[i];
   1383		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
   1384			remove_hash_entry(src, entry);
   1385			__add_hash_entry(new_hash, entry);
   1386		}
   1387	}
   1388	return new_hash;
   1389}
   1390
   1391static struct ftrace_hash *
   1392__ftrace_hash_move(struct ftrace_hash *src)
   1393{
   1394	int size = src->count;
   1395
   1396	/*
   1397	 * If the new source is empty, just return the empty_hash.
   1398	 */
   1399	if (ftrace_hash_empty(src))
   1400		return EMPTY_HASH;
   1401
   1402	return dup_hash(src, size);
   1403}
   1404
   1405static int
   1406ftrace_hash_move(struct ftrace_ops *ops, int enable,
   1407		 struct ftrace_hash **dst, struct ftrace_hash *src)
   1408{
   1409	struct ftrace_hash *new_hash;
   1410	int ret;
   1411
   1412	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
   1413	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
   1414		return -EINVAL;
   1415
   1416	new_hash = __ftrace_hash_move(src);
   1417	if (!new_hash)
   1418		return -ENOMEM;
   1419
   1420	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
   1421	if (enable) {
   1422		/* IPMODIFY should be updated only when filter_hash updating */
   1423		ret = ftrace_hash_ipmodify_update(ops, new_hash);
   1424		if (ret < 0) {
   1425			free_ftrace_hash(new_hash);
   1426			return ret;
   1427		}
   1428	}
   1429
   1430	/*
   1431	 * Remove the current set, update the hash and add
   1432	 * them back.
   1433	 */
   1434	ftrace_hash_rec_disable_modify(ops, enable);
   1435
   1436	rcu_assign_pointer(*dst, new_hash);
   1437
   1438	ftrace_hash_rec_enable_modify(ops, enable);
   1439
   1440	return 0;
   1441}
   1442
   1443static bool hash_contains_ip(unsigned long ip,
   1444			     struct ftrace_ops_hash *hash)
   1445{
   1446	/*
   1447	 * The function record is a match if it exists in the filter
   1448	 * hash and not in the notrace hash. Note, an empty hash is
   1449	 * considered a match for the filter hash, but an empty
   1450	 * notrace hash is considered not in the notrace hash.
   1451	 */
   1452	return (ftrace_hash_empty(hash->filter_hash) ||
   1453		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
   1454		(ftrace_hash_empty(hash->notrace_hash) ||
   1455		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
   1456}
   1457
   1458/*
   1459 * Test the hashes for this ops to see if we want to call
   1460 * the ops->func or not.
   1461 *
   1462 * It's a match if the ip is in the ops->filter_hash or
   1463 * the filter_hash does not exist or is empty,
   1464 *  AND
   1465 * the ip is not in the ops->notrace_hash.
   1466 *
   1467 * This needs to be called with preemption disabled as
   1468 * the hashes are freed with call_rcu().
   1469 */
   1470int
   1471ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
   1472{
   1473	struct ftrace_ops_hash hash;
   1474	int ret;
   1475
   1476#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
   1477	/*
   1478	 * There's a small race when adding ops that the ftrace handler
   1479	 * that wants regs, may be called without them. We can not
   1480	 * allow that handler to be called if regs is NULL.
   1481	 */
   1482	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
   1483		return 0;
   1484#endif
   1485
   1486	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
   1487	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
   1488
   1489	if (hash_contains_ip(ip, &hash))
   1490		ret = 1;
   1491	else
   1492		ret = 0;
   1493
   1494	return ret;
   1495}
   1496
   1497/*
   1498 * This is a double for. Do not use 'break' to break out of the loop,
   1499 * you must use a goto.
   1500 */
   1501#define do_for_each_ftrace_rec(pg, rec)					\
   1502	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
   1503		int _____i;						\
   1504		for (_____i = 0; _____i < pg->index; _____i++) {	\
   1505			rec = &pg->records[_____i];
   1506
   1507#define while_for_each_ftrace_rec()		\
   1508		}				\
   1509	}
   1510
   1511
   1512static int ftrace_cmp_recs(const void *a, const void *b)
   1513{
   1514	const struct dyn_ftrace *key = a;
   1515	const struct dyn_ftrace *rec = b;
   1516
   1517	if (key->flags < rec->ip)
   1518		return -1;
   1519	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
   1520		return 1;
   1521	return 0;
   1522}
   1523
   1524static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
   1525{
   1526	struct ftrace_page *pg;
   1527	struct dyn_ftrace *rec = NULL;
   1528	struct dyn_ftrace key;
   1529
   1530	key.ip = start;
   1531	key.flags = end;	/* overload flags, as it is unsigned long */
   1532
   1533	for (pg = ftrace_pages_start; pg; pg = pg->next) {
   1534		if (end < pg->records[0].ip ||
   1535		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
   1536			continue;
   1537		rec = bsearch(&key, pg->records, pg->index,
   1538			      sizeof(struct dyn_ftrace),
   1539			      ftrace_cmp_recs);
   1540		if (rec)
   1541			break;
   1542	}
   1543	return rec;
   1544}
   1545
   1546/**
   1547 * ftrace_location_range - return the first address of a traced location
   1548 *	if it touches the given ip range
   1549 * @start: start of range to search.
   1550 * @end: end of range to search (inclusive). @end points to the last byte
   1551 *	to check.
   1552 *
   1553 * Returns rec->ip if the related ftrace location is a least partly within
   1554 * the given address range. That is, the first address of the instruction
   1555 * that is either a NOP or call to the function tracer. It checks the ftrace
   1556 * internal tables to determine if the address belongs or not.
   1557 */
   1558unsigned long ftrace_location_range(unsigned long start, unsigned long end)
   1559{
   1560	struct dyn_ftrace *rec;
   1561
   1562	rec = lookup_rec(start, end);
   1563	if (rec)
   1564		return rec->ip;
   1565
   1566	return 0;
   1567}
   1568
   1569/**
   1570 * ftrace_location - return the ftrace location
   1571 * @ip: the instruction pointer to check
   1572 *
   1573 * If @ip matches the ftrace location, return @ip.
   1574 * If @ip matches sym+0, return sym's ftrace location.
   1575 * Otherwise, return 0.
   1576 */
   1577unsigned long ftrace_location(unsigned long ip)
   1578{
   1579	struct dyn_ftrace *rec;
   1580	unsigned long offset;
   1581	unsigned long size;
   1582
   1583	rec = lookup_rec(ip, ip);
   1584	if (!rec) {
   1585		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
   1586			goto out;
   1587
   1588		/* map sym+0 to __fentry__ */
   1589		if (!offset)
   1590			rec = lookup_rec(ip, ip + size - 1);
   1591	}
   1592
   1593	if (rec)
   1594		return rec->ip;
   1595
   1596out:
   1597	return 0;
   1598}
   1599
   1600/**
   1601 * ftrace_text_reserved - return true if range contains an ftrace location
   1602 * @start: start of range to search
   1603 * @end: end of range to search (inclusive). @end points to the last byte to check.
   1604 *
   1605 * Returns 1 if @start and @end contains a ftrace location.
   1606 * That is, the instruction that is either a NOP or call to
   1607 * the function tracer. It checks the ftrace internal tables to
   1608 * determine if the address belongs or not.
   1609 */
   1610int ftrace_text_reserved(const void *start, const void *end)
   1611{
   1612	unsigned long ret;
   1613
   1614	ret = ftrace_location_range((unsigned long)start,
   1615				    (unsigned long)end);
   1616
   1617	return (int)!!ret;
   1618}
   1619
   1620/* Test if ops registered to this rec needs regs */
   1621static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
   1622{
   1623	struct ftrace_ops *ops;
   1624	bool keep_regs = false;
   1625
   1626	for (ops = ftrace_ops_list;
   1627	     ops != &ftrace_list_end; ops = ops->next) {
   1628		/* pass rec in as regs to have non-NULL val */
   1629		if (ftrace_ops_test(ops, rec->ip, rec)) {
   1630			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
   1631				keep_regs = true;
   1632				break;
   1633			}
   1634		}
   1635	}
   1636
   1637	return  keep_regs;
   1638}
   1639
   1640static struct ftrace_ops *
   1641ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
   1642static struct ftrace_ops *
   1643ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
   1644static struct ftrace_ops *
   1645ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
   1646
   1647static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
   1648				     int filter_hash,
   1649				     bool inc)
   1650{
   1651	struct ftrace_hash *hash;
   1652	struct ftrace_hash *other_hash;
   1653	struct ftrace_page *pg;
   1654	struct dyn_ftrace *rec;
   1655	bool update = false;
   1656	int count = 0;
   1657	int all = false;
   1658
   1659	/* Only update if the ops has been registered */
   1660	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
   1661		return false;
   1662
   1663	/*
   1664	 * In the filter_hash case:
   1665	 *   If the count is zero, we update all records.
   1666	 *   Otherwise we just update the items in the hash.
   1667	 *
   1668	 * In the notrace_hash case:
   1669	 *   We enable the update in the hash.
   1670	 *   As disabling notrace means enabling the tracing,
   1671	 *   and enabling notrace means disabling, the inc variable
   1672	 *   gets inversed.
   1673	 */
   1674	if (filter_hash) {
   1675		hash = ops->func_hash->filter_hash;
   1676		other_hash = ops->func_hash->notrace_hash;
   1677		if (ftrace_hash_empty(hash))
   1678			all = true;
   1679	} else {
   1680		inc = !inc;
   1681		hash = ops->func_hash->notrace_hash;
   1682		other_hash = ops->func_hash->filter_hash;
   1683		/*
   1684		 * If the notrace hash has no items,
   1685		 * then there's nothing to do.
   1686		 */
   1687		if (ftrace_hash_empty(hash))
   1688			return false;
   1689	}
   1690
   1691	do_for_each_ftrace_rec(pg, rec) {
   1692		int in_other_hash = 0;
   1693		int in_hash = 0;
   1694		int match = 0;
   1695
   1696		if (rec->flags & FTRACE_FL_DISABLED)
   1697			continue;
   1698
   1699		if (all) {
   1700			/*
   1701			 * Only the filter_hash affects all records.
   1702			 * Update if the record is not in the notrace hash.
   1703			 */
   1704			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
   1705				match = 1;
   1706		} else {
   1707			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
   1708			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
   1709
   1710			/*
   1711			 * If filter_hash is set, we want to match all functions
   1712			 * that are in the hash but not in the other hash.
   1713			 *
   1714			 * If filter_hash is not set, then we are decrementing.
   1715			 * That means we match anything that is in the hash
   1716			 * and also in the other_hash. That is, we need to turn
   1717			 * off functions in the other hash because they are disabled
   1718			 * by this hash.
   1719			 */
   1720			if (filter_hash && in_hash && !in_other_hash)
   1721				match = 1;
   1722			else if (!filter_hash && in_hash &&
   1723				 (in_other_hash || ftrace_hash_empty(other_hash)))
   1724				match = 1;
   1725		}
   1726		if (!match)
   1727			continue;
   1728
   1729		if (inc) {
   1730			rec->flags++;
   1731			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
   1732				return false;
   1733
   1734			if (ops->flags & FTRACE_OPS_FL_DIRECT)
   1735				rec->flags |= FTRACE_FL_DIRECT;
   1736
   1737			/*
   1738			 * If there's only a single callback registered to a
   1739			 * function, and the ops has a trampoline registered
   1740			 * for it, then we can call it directly.
   1741			 */
   1742			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
   1743				rec->flags |= FTRACE_FL_TRAMP;
   1744			else
   1745				/*
   1746				 * If we are adding another function callback
   1747				 * to this function, and the previous had a
   1748				 * custom trampoline in use, then we need to go
   1749				 * back to the default trampoline.
   1750				 */
   1751				rec->flags &= ~FTRACE_FL_TRAMP;
   1752
   1753			/*
   1754			 * If any ops wants regs saved for this function
   1755			 * then all ops will get saved regs.
   1756			 */
   1757			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
   1758				rec->flags |= FTRACE_FL_REGS;
   1759		} else {
   1760			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
   1761				return false;
   1762			rec->flags--;
   1763
   1764			/*
   1765			 * Only the internal direct_ops should have the
   1766			 * DIRECT flag set. Thus, if it is removing a
   1767			 * function, then that function should no longer
   1768			 * be direct.
   1769			 */
   1770			if (ops->flags & FTRACE_OPS_FL_DIRECT)
   1771				rec->flags &= ~FTRACE_FL_DIRECT;
   1772
   1773			/*
   1774			 * If the rec had REGS enabled and the ops that is
   1775			 * being removed had REGS set, then see if there is
   1776			 * still any ops for this record that wants regs.
   1777			 * If not, we can stop recording them.
   1778			 */
   1779			if (ftrace_rec_count(rec) > 0 &&
   1780			    rec->flags & FTRACE_FL_REGS &&
   1781			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
   1782				if (!test_rec_ops_needs_regs(rec))
   1783					rec->flags &= ~FTRACE_FL_REGS;
   1784			}
   1785
   1786			/*
   1787			 * The TRAMP needs to be set only if rec count
   1788			 * is decremented to one, and the ops that is
   1789			 * left has a trampoline. As TRAMP can only be
   1790			 * enabled if there is only a single ops attached
   1791			 * to it.
   1792			 */
   1793			if (ftrace_rec_count(rec) == 1 &&
   1794			    ftrace_find_tramp_ops_any_other(rec, ops))
   1795				rec->flags |= FTRACE_FL_TRAMP;
   1796			else
   1797				rec->flags &= ~FTRACE_FL_TRAMP;
   1798
   1799			/*
   1800			 * flags will be cleared in ftrace_check_record()
   1801			 * if rec count is zero.
   1802			 */
   1803		}
   1804		count++;
   1805
   1806		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
   1807		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
   1808
   1809		/* Shortcut, if we handled all records, we are done. */
   1810		if (!all && count == hash->count)
   1811			return update;
   1812	} while_for_each_ftrace_rec();
   1813
   1814	return update;
   1815}
   1816
   1817static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
   1818				    int filter_hash)
   1819{
   1820	return __ftrace_hash_rec_update(ops, filter_hash, 0);
   1821}
   1822
   1823static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
   1824				   int filter_hash)
   1825{
   1826	return __ftrace_hash_rec_update(ops, filter_hash, 1);
   1827}
   1828
   1829static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
   1830					  int filter_hash, int inc)
   1831{
   1832	struct ftrace_ops *op;
   1833
   1834	__ftrace_hash_rec_update(ops, filter_hash, inc);
   1835
   1836	if (ops->func_hash != &global_ops.local_hash)
   1837		return;
   1838
   1839	/*
   1840	 * If the ops shares the global_ops hash, then we need to update
   1841	 * all ops that are enabled and use this hash.
   1842	 */
   1843	do_for_each_ftrace_op(op, ftrace_ops_list) {
   1844		/* Already done */
   1845		if (op == ops)
   1846			continue;
   1847		if (op->func_hash == &global_ops.local_hash)
   1848			__ftrace_hash_rec_update(op, filter_hash, inc);
   1849	} while_for_each_ftrace_op(op);
   1850}
   1851
   1852static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
   1853					   int filter_hash)
   1854{
   1855	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
   1856}
   1857
   1858static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
   1859					  int filter_hash)
   1860{
   1861	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
   1862}
   1863
   1864/*
   1865 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
   1866 * or no-needed to update, -EBUSY if it detects a conflict of the flag
   1867 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
   1868 * Note that old_hash and new_hash has below meanings
   1869 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
   1870 *  - If the hash is EMPTY_HASH, it hits nothing
   1871 *  - Anything else hits the recs which match the hash entries.
   1872 */
   1873static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
   1874					 struct ftrace_hash *old_hash,
   1875					 struct ftrace_hash *new_hash)
   1876{
   1877	struct ftrace_page *pg;
   1878	struct dyn_ftrace *rec, *end = NULL;
   1879	int in_old, in_new;
   1880
   1881	/* Only update if the ops has been registered */
   1882	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
   1883		return 0;
   1884
   1885	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
   1886		return 0;
   1887
   1888	/*
   1889	 * Since the IPMODIFY is a very address sensitive action, we do not
   1890	 * allow ftrace_ops to set all functions to new hash.
   1891	 */
   1892	if (!new_hash || !old_hash)
   1893		return -EINVAL;
   1894
   1895	/* Update rec->flags */
   1896	do_for_each_ftrace_rec(pg, rec) {
   1897
   1898		if (rec->flags & FTRACE_FL_DISABLED)
   1899			continue;
   1900
   1901		/* We need to update only differences of filter_hash */
   1902		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
   1903		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
   1904		if (in_old == in_new)
   1905			continue;
   1906
   1907		if (in_new) {
   1908			/* New entries must ensure no others are using it */
   1909			if (rec->flags & FTRACE_FL_IPMODIFY)
   1910				goto rollback;
   1911			rec->flags |= FTRACE_FL_IPMODIFY;
   1912		} else /* Removed entry */
   1913			rec->flags &= ~FTRACE_FL_IPMODIFY;
   1914	} while_for_each_ftrace_rec();
   1915
   1916	return 0;
   1917
   1918rollback:
   1919	end = rec;
   1920
   1921	/* Roll back what we did above */
   1922	do_for_each_ftrace_rec(pg, rec) {
   1923
   1924		if (rec->flags & FTRACE_FL_DISABLED)
   1925			continue;
   1926
   1927		if (rec == end)
   1928			goto err_out;
   1929
   1930		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
   1931		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
   1932		if (in_old == in_new)
   1933			continue;
   1934
   1935		if (in_new)
   1936			rec->flags &= ~FTRACE_FL_IPMODIFY;
   1937		else
   1938			rec->flags |= FTRACE_FL_IPMODIFY;
   1939	} while_for_each_ftrace_rec();
   1940
   1941err_out:
   1942	return -EBUSY;
   1943}
   1944
   1945static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
   1946{
   1947	struct ftrace_hash *hash = ops->func_hash->filter_hash;
   1948
   1949	if (ftrace_hash_empty(hash))
   1950		hash = NULL;
   1951
   1952	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
   1953}
   1954
   1955/* Disabling always succeeds */
   1956static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
   1957{
   1958	struct ftrace_hash *hash = ops->func_hash->filter_hash;
   1959
   1960	if (ftrace_hash_empty(hash))
   1961		hash = NULL;
   1962
   1963	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
   1964}
   1965
   1966static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
   1967				       struct ftrace_hash *new_hash)
   1968{
   1969	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
   1970
   1971	if (ftrace_hash_empty(old_hash))
   1972		old_hash = NULL;
   1973
   1974	if (ftrace_hash_empty(new_hash))
   1975		new_hash = NULL;
   1976
   1977	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
   1978}
   1979
   1980static void print_ip_ins(const char *fmt, const unsigned char *p)
   1981{
   1982	char ins[MCOUNT_INSN_SIZE];
   1983	int i;
   1984
   1985	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
   1986		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
   1987		return;
   1988	}
   1989
   1990	printk(KERN_CONT "%s", fmt);
   1991
   1992	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
   1993		printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
   1994}
   1995
   1996enum ftrace_bug_type ftrace_bug_type;
   1997const void *ftrace_expected;
   1998
   1999static void print_bug_type(void)
   2000{
   2001	switch (ftrace_bug_type) {
   2002	case FTRACE_BUG_UNKNOWN:
   2003		break;
   2004	case FTRACE_BUG_INIT:
   2005		pr_info("Initializing ftrace call sites\n");
   2006		break;
   2007	case FTRACE_BUG_NOP:
   2008		pr_info("Setting ftrace call site to NOP\n");
   2009		break;
   2010	case FTRACE_BUG_CALL:
   2011		pr_info("Setting ftrace call site to call ftrace function\n");
   2012		break;
   2013	case FTRACE_BUG_UPDATE:
   2014		pr_info("Updating ftrace call site to call a different ftrace function\n");
   2015		break;
   2016	}
   2017}
   2018
   2019/**
   2020 * ftrace_bug - report and shutdown function tracer
   2021 * @failed: The failed type (EFAULT, EINVAL, EPERM)
   2022 * @rec: The record that failed
   2023 *
   2024 * The arch code that enables or disables the function tracing
   2025 * can call ftrace_bug() when it has detected a problem in
   2026 * modifying the code. @failed should be one of either:
   2027 * EFAULT - if the problem happens on reading the @ip address
   2028 * EINVAL - if what is read at @ip is not what was expected
   2029 * EPERM - if the problem happens on writing to the @ip address
   2030 */
   2031void ftrace_bug(int failed, struct dyn_ftrace *rec)
   2032{
   2033	unsigned long ip = rec ? rec->ip : 0;
   2034
   2035	pr_info("------------[ ftrace bug ]------------\n");
   2036
   2037	switch (failed) {
   2038	case -EFAULT:
   2039		pr_info("ftrace faulted on modifying ");
   2040		print_ip_sym(KERN_INFO, ip);
   2041		break;
   2042	case -EINVAL:
   2043		pr_info("ftrace failed to modify ");
   2044		print_ip_sym(KERN_INFO, ip);
   2045		print_ip_ins(" actual:   ", (unsigned char *)ip);
   2046		pr_cont("\n");
   2047		if (ftrace_expected) {
   2048			print_ip_ins(" expected: ", ftrace_expected);
   2049			pr_cont("\n");
   2050		}
   2051		break;
   2052	case -EPERM:
   2053		pr_info("ftrace faulted on writing ");
   2054		print_ip_sym(KERN_INFO, ip);
   2055		break;
   2056	default:
   2057		pr_info("ftrace faulted on unknown error ");
   2058		print_ip_sym(KERN_INFO, ip);
   2059	}
   2060	print_bug_type();
   2061	if (rec) {
   2062		struct ftrace_ops *ops = NULL;
   2063
   2064		pr_info("ftrace record flags: %lx\n", rec->flags);
   2065		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
   2066			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
   2067		if (rec->flags & FTRACE_FL_TRAMP_EN) {
   2068			ops = ftrace_find_tramp_ops_any(rec);
   2069			if (ops) {
   2070				do {
   2071					pr_cont("\ttramp: %pS (%pS)",
   2072						(void *)ops->trampoline,
   2073						(void *)ops->func);
   2074					ops = ftrace_find_tramp_ops_next(rec, ops);
   2075				} while (ops);
   2076			} else
   2077				pr_cont("\ttramp: ERROR!");
   2078
   2079		}
   2080		ip = ftrace_get_addr_curr(rec);
   2081		pr_cont("\n expected tramp: %lx\n", ip);
   2082	}
   2083
   2084	FTRACE_WARN_ON_ONCE(1);
   2085}
   2086
   2087static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
   2088{
   2089	unsigned long flag = 0UL;
   2090
   2091	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
   2092
   2093	if (rec->flags & FTRACE_FL_DISABLED)
   2094		return FTRACE_UPDATE_IGNORE;
   2095
   2096	/*
   2097	 * If we are updating calls:
   2098	 *
   2099	 *   If the record has a ref count, then we need to enable it
   2100	 *   because someone is using it.
   2101	 *
   2102	 *   Otherwise we make sure its disabled.
   2103	 *
   2104	 * If we are disabling calls, then disable all records that
   2105	 * are enabled.
   2106	 */
   2107	if (enable && ftrace_rec_count(rec))
   2108		flag = FTRACE_FL_ENABLED;
   2109
   2110	/*
   2111	 * If enabling and the REGS flag does not match the REGS_EN, or
   2112	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
   2113	 * this record. Set flags to fail the compare against ENABLED.
   2114	 * Same for direct calls.
   2115	 */
   2116	if (flag) {
   2117		if (!(rec->flags & FTRACE_FL_REGS) !=
   2118		    !(rec->flags & FTRACE_FL_REGS_EN))
   2119			flag |= FTRACE_FL_REGS;
   2120
   2121		if (!(rec->flags & FTRACE_FL_TRAMP) !=
   2122		    !(rec->flags & FTRACE_FL_TRAMP_EN))
   2123			flag |= FTRACE_FL_TRAMP;
   2124
   2125		/*
   2126		 * Direct calls are special, as count matters.
   2127		 * We must test the record for direct, if the
   2128		 * DIRECT and DIRECT_EN do not match, but only
   2129		 * if the count is 1. That's because, if the
   2130		 * count is something other than one, we do not
   2131		 * want the direct enabled (it will be done via the
   2132		 * direct helper). But if DIRECT_EN is set, and
   2133		 * the count is not one, we need to clear it.
   2134		 */
   2135		if (ftrace_rec_count(rec) == 1) {
   2136			if (!(rec->flags & FTRACE_FL_DIRECT) !=
   2137			    !(rec->flags & FTRACE_FL_DIRECT_EN))
   2138				flag |= FTRACE_FL_DIRECT;
   2139		} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
   2140			flag |= FTRACE_FL_DIRECT;
   2141		}
   2142	}
   2143
   2144	/* If the state of this record hasn't changed, then do nothing */
   2145	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
   2146		return FTRACE_UPDATE_IGNORE;
   2147
   2148	if (flag) {
   2149		/* Save off if rec is being enabled (for return value) */
   2150		flag ^= rec->flags & FTRACE_FL_ENABLED;
   2151
   2152		if (update) {
   2153			rec->flags |= FTRACE_FL_ENABLED;
   2154			if (flag & FTRACE_FL_REGS) {
   2155				if (rec->flags & FTRACE_FL_REGS)
   2156					rec->flags |= FTRACE_FL_REGS_EN;
   2157				else
   2158					rec->flags &= ~FTRACE_FL_REGS_EN;
   2159			}
   2160			if (flag & FTRACE_FL_TRAMP) {
   2161				if (rec->flags & FTRACE_FL_TRAMP)
   2162					rec->flags |= FTRACE_FL_TRAMP_EN;
   2163				else
   2164					rec->flags &= ~FTRACE_FL_TRAMP_EN;
   2165			}
   2166
   2167			if (flag & FTRACE_FL_DIRECT) {
   2168				/*
   2169				 * If there's only one user (direct_ops helper)
   2170				 * then we can call the direct function
   2171				 * directly (no ftrace trampoline).
   2172				 */
   2173				if (ftrace_rec_count(rec) == 1) {
   2174					if (rec->flags & FTRACE_FL_DIRECT)
   2175						rec->flags |= FTRACE_FL_DIRECT_EN;
   2176					else
   2177						rec->flags &= ~FTRACE_FL_DIRECT_EN;
   2178				} else {
   2179					/*
   2180					 * Can only call directly if there's
   2181					 * only one callback to the function.
   2182					 */
   2183					rec->flags &= ~FTRACE_FL_DIRECT_EN;
   2184				}
   2185			}
   2186		}
   2187
   2188		/*
   2189		 * If this record is being updated from a nop, then
   2190		 *   return UPDATE_MAKE_CALL.
   2191		 * Otherwise,
   2192		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
   2193		 *   from the save regs, to a non-save regs function or
   2194		 *   vice versa, or from a trampoline call.
   2195		 */
   2196		if (flag & FTRACE_FL_ENABLED) {
   2197			ftrace_bug_type = FTRACE_BUG_CALL;
   2198			return FTRACE_UPDATE_MAKE_CALL;
   2199		}
   2200
   2201		ftrace_bug_type = FTRACE_BUG_UPDATE;
   2202		return FTRACE_UPDATE_MODIFY_CALL;
   2203	}
   2204
   2205	if (update) {
   2206		/* If there's no more users, clear all flags */
   2207		if (!ftrace_rec_count(rec))
   2208			rec->flags = 0;
   2209		else
   2210			/*
   2211			 * Just disable the record, but keep the ops TRAMP
   2212			 * and REGS states. The _EN flags must be disabled though.
   2213			 */
   2214			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
   2215					FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
   2216	}
   2217
   2218	ftrace_bug_type = FTRACE_BUG_NOP;
   2219	return FTRACE_UPDATE_MAKE_NOP;
   2220}
   2221
   2222/**
   2223 * ftrace_update_record - set a record that now is tracing or not
   2224 * @rec: the record to update
   2225 * @enable: set to true if the record is tracing, false to force disable
   2226 *
   2227 * The records that represent all functions that can be traced need
   2228 * to be updated when tracing has been enabled.
   2229 */
   2230int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
   2231{
   2232	return ftrace_check_record(rec, enable, true);
   2233}
   2234
   2235/**
   2236 * ftrace_test_record - check if the record has been enabled or not
   2237 * @rec: the record to test
   2238 * @enable: set to true to check if enabled, false if it is disabled
   2239 *
   2240 * The arch code may need to test if a record is already set to
   2241 * tracing to determine how to modify the function code that it
   2242 * represents.
   2243 */
   2244int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
   2245{
   2246	return ftrace_check_record(rec, enable, false);
   2247}
   2248
   2249static struct ftrace_ops *
   2250ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
   2251{
   2252	struct ftrace_ops *op;
   2253	unsigned long ip = rec->ip;
   2254
   2255	do_for_each_ftrace_op(op, ftrace_ops_list) {
   2256
   2257		if (!op->trampoline)
   2258			continue;
   2259
   2260		if (hash_contains_ip(ip, op->func_hash))
   2261			return op;
   2262	} while_for_each_ftrace_op(op);
   2263
   2264	return NULL;
   2265}
   2266
   2267static struct ftrace_ops *
   2268ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
   2269{
   2270	struct ftrace_ops *op;
   2271	unsigned long ip = rec->ip;
   2272
   2273	do_for_each_ftrace_op(op, ftrace_ops_list) {
   2274
   2275		if (op == op_exclude || !op->trampoline)
   2276			continue;
   2277
   2278		if (hash_contains_ip(ip, op->func_hash))
   2279			return op;
   2280	} while_for_each_ftrace_op(op);
   2281
   2282	return NULL;
   2283}
   2284
   2285static struct ftrace_ops *
   2286ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
   2287			   struct ftrace_ops *op)
   2288{
   2289	unsigned long ip = rec->ip;
   2290
   2291	while_for_each_ftrace_op(op) {
   2292
   2293		if (!op->trampoline)
   2294			continue;
   2295
   2296		if (hash_contains_ip(ip, op->func_hash))
   2297			return op;
   2298	}
   2299
   2300	return NULL;
   2301}
   2302
   2303static struct ftrace_ops *
   2304ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
   2305{
   2306	struct ftrace_ops *op;
   2307	unsigned long ip = rec->ip;
   2308
   2309	/*
   2310	 * Need to check removed ops first.
   2311	 * If they are being removed, and this rec has a tramp,
   2312	 * and this rec is in the ops list, then it would be the
   2313	 * one with the tramp.
   2314	 */
   2315	if (removed_ops) {
   2316		if (hash_contains_ip(ip, &removed_ops->old_hash))
   2317			return removed_ops;
   2318	}
   2319
   2320	/*
   2321	 * Need to find the current trampoline for a rec.
   2322	 * Now, a trampoline is only attached to a rec if there
   2323	 * was a single 'ops' attached to it. But this can be called
   2324	 * when we are adding another op to the rec or removing the
   2325	 * current one. Thus, if the op is being added, we can
   2326	 * ignore it because it hasn't attached itself to the rec
   2327	 * yet.
   2328	 *
   2329	 * If an ops is being modified (hooking to different functions)
   2330	 * then we don't care about the new functions that are being
   2331	 * added, just the old ones (that are probably being removed).
   2332	 *
   2333	 * If we are adding an ops to a function that already is using
   2334	 * a trampoline, it needs to be removed (trampolines are only
   2335	 * for single ops connected), then an ops that is not being
   2336	 * modified also needs to be checked.
   2337	 */
   2338	do_for_each_ftrace_op(op, ftrace_ops_list) {
   2339
   2340		if (!op->trampoline)
   2341			continue;
   2342
   2343		/*
   2344		 * If the ops is being added, it hasn't gotten to
   2345		 * the point to be removed from this tree yet.
   2346		 */
   2347		if (op->flags & FTRACE_OPS_FL_ADDING)
   2348			continue;
   2349
   2350
   2351		/*
   2352		 * If the ops is being modified and is in the old
   2353		 * hash, then it is probably being removed from this
   2354		 * function.
   2355		 */
   2356		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
   2357		    hash_contains_ip(ip, &op->old_hash))
   2358			return op;
   2359		/*
   2360		 * If the ops is not being added or modified, and it's
   2361		 * in its normal filter hash, then this must be the one
   2362		 * we want!
   2363		 */
   2364		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
   2365		    hash_contains_ip(ip, op->func_hash))
   2366			return op;
   2367
   2368	} while_for_each_ftrace_op(op);
   2369
   2370	return NULL;
   2371}
   2372
   2373static struct ftrace_ops *
   2374ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
   2375{
   2376	struct ftrace_ops *op;
   2377	unsigned long ip = rec->ip;
   2378
   2379	do_for_each_ftrace_op(op, ftrace_ops_list) {
   2380		/* pass rec in as regs to have non-NULL val */
   2381		if (hash_contains_ip(ip, op->func_hash))
   2382			return op;
   2383	} while_for_each_ftrace_op(op);
   2384
   2385	return NULL;
   2386}
   2387
   2388#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
   2389/* Protected by rcu_tasks for reading, and direct_mutex for writing */
   2390static struct ftrace_hash *direct_functions = EMPTY_HASH;
   2391static DEFINE_MUTEX(direct_mutex);
   2392int ftrace_direct_func_count;
   2393
   2394/*
   2395 * Search the direct_functions hash to see if the given instruction pointer
   2396 * has a direct caller attached to it.
   2397 */
   2398unsigned long ftrace_find_rec_direct(unsigned long ip)
   2399{
   2400	struct ftrace_func_entry *entry;
   2401
   2402	entry = __ftrace_lookup_ip(direct_functions, ip);
   2403	if (!entry)
   2404		return 0;
   2405
   2406	return entry->direct;
   2407}
   2408
   2409static struct ftrace_func_entry*
   2410ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
   2411		      struct ftrace_hash **free_hash)
   2412{
   2413	struct ftrace_func_entry *entry;
   2414
   2415	if (ftrace_hash_empty(direct_functions) ||
   2416	    direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
   2417		struct ftrace_hash *new_hash;
   2418		int size = ftrace_hash_empty(direct_functions) ? 0 :
   2419			direct_functions->count + 1;
   2420
   2421		if (size < 32)
   2422			size = 32;
   2423
   2424		new_hash = dup_hash(direct_functions, size);
   2425		if (!new_hash)
   2426			return NULL;
   2427
   2428		*free_hash = direct_functions;
   2429		direct_functions = new_hash;
   2430	}
   2431
   2432	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
   2433	if (!entry)
   2434		return NULL;
   2435
   2436	entry->ip = ip;
   2437	entry->direct = addr;
   2438	__add_hash_entry(direct_functions, entry);
   2439	return entry;
   2440}
   2441
   2442static void call_direct_funcs(unsigned long ip, unsigned long pip,
   2443			      struct ftrace_ops *ops, struct ftrace_regs *fregs)
   2444{
   2445	struct pt_regs *regs = ftrace_get_regs(fregs);
   2446	unsigned long addr;
   2447
   2448	addr = ftrace_find_rec_direct(ip);
   2449	if (!addr)
   2450		return;
   2451
   2452	arch_ftrace_set_direct_caller(regs, addr);
   2453}
   2454
   2455struct ftrace_ops direct_ops = {
   2456	.func		= call_direct_funcs,
   2457	.flags		= FTRACE_OPS_FL_IPMODIFY
   2458			  | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
   2459			  | FTRACE_OPS_FL_PERMANENT,
   2460	/*
   2461	 * By declaring the main trampoline as this trampoline
   2462	 * it will never have one allocated for it. Allocated
   2463	 * trampolines should not call direct functions.
   2464	 * The direct_ops should only be called by the builtin
   2465	 * ftrace_regs_caller trampoline.
   2466	 */
   2467	.trampoline	= FTRACE_REGS_ADDR,
   2468};
   2469#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
   2470
   2471/**
   2472 * ftrace_get_addr_new - Get the call address to set to
   2473 * @rec:  The ftrace record descriptor
   2474 *
   2475 * If the record has the FTRACE_FL_REGS set, that means that it
   2476 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
   2477 * is not set, then it wants to convert to the normal callback.
   2478 *
   2479 * Returns the address of the trampoline to set to
   2480 */
   2481unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
   2482{
   2483	struct ftrace_ops *ops;
   2484	unsigned long addr;
   2485
   2486	if ((rec->flags & FTRACE_FL_DIRECT) &&
   2487	    (ftrace_rec_count(rec) == 1)) {
   2488		addr = ftrace_find_rec_direct(rec->ip);
   2489		if (addr)
   2490			return addr;
   2491		WARN_ON_ONCE(1);
   2492	}
   2493
   2494	/* Trampolines take precedence over regs */
   2495	if (rec->flags & FTRACE_FL_TRAMP) {
   2496		ops = ftrace_find_tramp_ops_new(rec);
   2497		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
   2498			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
   2499				(void *)rec->ip, (void *)rec->ip, rec->flags);
   2500			/* Ftrace is shutting down, return anything */
   2501			return (unsigned long)FTRACE_ADDR;
   2502		}
   2503		return ops->trampoline;
   2504	}
   2505
   2506	if (rec->flags & FTRACE_FL_REGS)
   2507		return (unsigned long)FTRACE_REGS_ADDR;
   2508	else
   2509		return (unsigned long)FTRACE_ADDR;
   2510}
   2511
   2512/**
   2513 * ftrace_get_addr_curr - Get the call address that is already there
   2514 * @rec:  The ftrace record descriptor
   2515 *
   2516 * The FTRACE_FL_REGS_EN is set when the record already points to
   2517 * a function that saves all the regs. Basically the '_EN' version
   2518 * represents the current state of the function.
   2519 *
   2520 * Returns the address of the trampoline that is currently being called
   2521 */
   2522unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
   2523{
   2524	struct ftrace_ops *ops;
   2525	unsigned long addr;
   2526
   2527	/* Direct calls take precedence over trampolines */
   2528	if (rec->flags & FTRACE_FL_DIRECT_EN) {
   2529		addr = ftrace_find_rec_direct(rec->ip);
   2530		if (addr)
   2531			return addr;
   2532		WARN_ON_ONCE(1);
   2533	}
   2534
   2535	/* Trampolines take precedence over regs */
   2536	if (rec->flags & FTRACE_FL_TRAMP_EN) {
   2537		ops = ftrace_find_tramp_ops_curr(rec);
   2538		if (FTRACE_WARN_ON(!ops)) {
   2539			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
   2540				(void *)rec->ip, (void *)rec->ip);
   2541			/* Ftrace is shutting down, return anything */
   2542			return (unsigned long)FTRACE_ADDR;
   2543		}
   2544		return ops->trampoline;
   2545	}
   2546
   2547	if (rec->flags & FTRACE_FL_REGS_EN)
   2548		return (unsigned long)FTRACE_REGS_ADDR;
   2549	else
   2550		return (unsigned long)FTRACE_ADDR;
   2551}
   2552
   2553static int
   2554__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
   2555{
   2556	unsigned long ftrace_old_addr;
   2557	unsigned long ftrace_addr;
   2558	int ret;
   2559
   2560	ftrace_addr = ftrace_get_addr_new(rec);
   2561
   2562	/* This needs to be done before we call ftrace_update_record */
   2563	ftrace_old_addr = ftrace_get_addr_curr(rec);
   2564
   2565	ret = ftrace_update_record(rec, enable);
   2566
   2567	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
   2568
   2569	switch (ret) {
   2570	case FTRACE_UPDATE_IGNORE:
   2571		return 0;
   2572
   2573	case FTRACE_UPDATE_MAKE_CALL:
   2574		ftrace_bug_type = FTRACE_BUG_CALL;
   2575		return ftrace_make_call(rec, ftrace_addr);
   2576
   2577	case FTRACE_UPDATE_MAKE_NOP:
   2578		ftrace_bug_type = FTRACE_BUG_NOP;
   2579		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
   2580
   2581	case FTRACE_UPDATE_MODIFY_CALL:
   2582		ftrace_bug_type = FTRACE_BUG_UPDATE;
   2583		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
   2584	}
   2585
   2586	return -1; /* unknown ftrace bug */
   2587}
   2588
   2589void __weak ftrace_replace_code(int mod_flags)
   2590{
   2591	struct dyn_ftrace *rec;
   2592	struct ftrace_page *pg;
   2593	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
   2594	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
   2595	int failed;
   2596
   2597	if (unlikely(ftrace_disabled))
   2598		return;
   2599
   2600	do_for_each_ftrace_rec(pg, rec) {
   2601
   2602		if (rec->flags & FTRACE_FL_DISABLED)
   2603			continue;
   2604
   2605		failed = __ftrace_replace_code(rec, enable);
   2606		if (failed) {
   2607			ftrace_bug(failed, rec);
   2608			/* Stop processing */
   2609			return;
   2610		}
   2611		if (schedulable)
   2612			cond_resched();
   2613	} while_for_each_ftrace_rec();
   2614}
   2615
   2616struct ftrace_rec_iter {
   2617	struct ftrace_page	*pg;
   2618	int			index;
   2619};
   2620
   2621/**
   2622 * ftrace_rec_iter_start - start up iterating over traced functions
   2623 *
   2624 * Returns an iterator handle that is used to iterate over all
   2625 * the records that represent address locations where functions
   2626 * are traced.
   2627 *
   2628 * May return NULL if no records are available.
   2629 */
   2630struct ftrace_rec_iter *ftrace_rec_iter_start(void)
   2631{
   2632	/*
   2633	 * We only use a single iterator.
   2634	 * Protected by the ftrace_lock mutex.
   2635	 */
   2636	static struct ftrace_rec_iter ftrace_rec_iter;
   2637	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
   2638
   2639	iter->pg = ftrace_pages_start;
   2640	iter->index = 0;
   2641
   2642	/* Could have empty pages */
   2643	while (iter->pg && !iter->pg->index)
   2644		iter->pg = iter->pg->next;
   2645
   2646	if (!iter->pg)
   2647		return NULL;
   2648
   2649	return iter;
   2650}
   2651
   2652/**
   2653 * ftrace_rec_iter_next - get the next record to process.
   2654 * @iter: The handle to the iterator.
   2655 *
   2656 * Returns the next iterator after the given iterator @iter.
   2657 */
   2658struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
   2659{
   2660	iter->index++;
   2661
   2662	if (iter->index >= iter->pg->index) {
   2663		iter->pg = iter->pg->next;
   2664		iter->index = 0;
   2665
   2666		/* Could have empty pages */
   2667		while (iter->pg && !iter->pg->index)
   2668			iter->pg = iter->pg->next;
   2669	}
   2670
   2671	if (!iter->pg)
   2672		return NULL;
   2673
   2674	return iter;
   2675}
   2676
   2677/**
   2678 * ftrace_rec_iter_record - get the record at the iterator location
   2679 * @iter: The current iterator location
   2680 *
   2681 * Returns the record that the current @iter is at.
   2682 */
   2683struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
   2684{
   2685	return &iter->pg->records[iter->index];
   2686}
   2687
   2688static int
   2689ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
   2690{
   2691	int ret;
   2692
   2693	if (unlikely(ftrace_disabled))
   2694		return 0;
   2695
   2696	ret = ftrace_init_nop(mod, rec);
   2697	if (ret) {
   2698		ftrace_bug_type = FTRACE_BUG_INIT;
   2699		ftrace_bug(ret, rec);
   2700		return 0;
   2701	}
   2702	return 1;
   2703}
   2704
   2705/*
   2706 * archs can override this function if they must do something
   2707 * before the modifying code is performed.
   2708 */
   2709void __weak ftrace_arch_code_modify_prepare(void)
   2710{
   2711}
   2712
   2713/*
   2714 * archs can override this function if they must do something
   2715 * after the modifying code is performed.
   2716 */
   2717void __weak ftrace_arch_code_modify_post_process(void)
   2718{
   2719}
   2720
   2721void ftrace_modify_all_code(int command)
   2722{
   2723	int update = command & FTRACE_UPDATE_TRACE_FUNC;
   2724	int mod_flags = 0;
   2725	int err = 0;
   2726
   2727	if (command & FTRACE_MAY_SLEEP)
   2728		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
   2729
   2730	/*
   2731	 * If the ftrace_caller calls a ftrace_ops func directly,
   2732	 * we need to make sure that it only traces functions it
   2733	 * expects to trace. When doing the switch of functions,
   2734	 * we need to update to the ftrace_ops_list_func first
   2735	 * before the transition between old and new calls are set,
   2736	 * as the ftrace_ops_list_func will check the ops hashes
   2737	 * to make sure the ops are having the right functions
   2738	 * traced.
   2739	 */
   2740	if (update) {
   2741		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
   2742		if (FTRACE_WARN_ON(err))
   2743			return;
   2744	}
   2745
   2746	if (command & FTRACE_UPDATE_CALLS)
   2747		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
   2748	else if (command & FTRACE_DISABLE_CALLS)
   2749		ftrace_replace_code(mod_flags);
   2750
   2751	if (update && ftrace_trace_function != ftrace_ops_list_func) {
   2752		function_trace_op = set_function_trace_op;
   2753		smp_wmb();
   2754		/* If irqs are disabled, we are in stop machine */
   2755		if (!irqs_disabled())
   2756			smp_call_function(ftrace_sync_ipi, NULL, 1);
   2757		err = ftrace_update_ftrace_func(ftrace_trace_function);
   2758		if (FTRACE_WARN_ON(err))
   2759			return;
   2760	}
   2761
   2762	if (command & FTRACE_START_FUNC_RET)
   2763		err = ftrace_enable_ftrace_graph_caller();
   2764	else if (command & FTRACE_STOP_FUNC_RET)
   2765		err = ftrace_disable_ftrace_graph_caller();
   2766	FTRACE_WARN_ON(err);
   2767}
   2768
   2769static int __ftrace_modify_code(void *data)
   2770{
   2771	int *command = data;
   2772
   2773	ftrace_modify_all_code(*command);
   2774
   2775	return 0;
   2776}
   2777
   2778/**
   2779 * ftrace_run_stop_machine - go back to the stop machine method
   2780 * @command: The command to tell ftrace what to do
   2781 *
   2782 * If an arch needs to fall back to the stop machine method, the
   2783 * it can call this function.
   2784 */
   2785void ftrace_run_stop_machine(int command)
   2786{
   2787	stop_machine(__ftrace_modify_code, &command, NULL);
   2788}
   2789
   2790/**
   2791 * arch_ftrace_update_code - modify the code to trace or not trace
   2792 * @command: The command that needs to be done
   2793 *
   2794 * Archs can override this function if it does not need to
   2795 * run stop_machine() to modify code.
   2796 */
   2797void __weak arch_ftrace_update_code(int command)
   2798{
   2799	ftrace_run_stop_machine(command);
   2800}
   2801
   2802static void ftrace_run_update_code(int command)
   2803{
   2804	ftrace_arch_code_modify_prepare();
   2805
   2806	/*
   2807	 * By default we use stop_machine() to modify the code.
   2808	 * But archs can do what ever they want as long as it
   2809	 * is safe. The stop_machine() is the safest, but also
   2810	 * produces the most overhead.
   2811	 */
   2812	arch_ftrace_update_code(command);
   2813
   2814	ftrace_arch_code_modify_post_process();
   2815}
   2816
   2817static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
   2818				   struct ftrace_ops_hash *old_hash)
   2819{
   2820	ops->flags |= FTRACE_OPS_FL_MODIFYING;
   2821	ops->old_hash.filter_hash = old_hash->filter_hash;
   2822	ops->old_hash.notrace_hash = old_hash->notrace_hash;
   2823	ftrace_run_update_code(command);
   2824	ops->old_hash.filter_hash = NULL;
   2825	ops->old_hash.notrace_hash = NULL;
   2826	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
   2827}
   2828
   2829static ftrace_func_t saved_ftrace_func;
   2830static int ftrace_start_up;
   2831
   2832void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
   2833{
   2834}
   2835
   2836/* List of trace_ops that have allocated trampolines */
   2837static LIST_HEAD(ftrace_ops_trampoline_list);
   2838
   2839static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
   2840{
   2841	lockdep_assert_held(&ftrace_lock);
   2842	list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
   2843}
   2844
   2845static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
   2846{
   2847	lockdep_assert_held(&ftrace_lock);
   2848	list_del_rcu(&ops->list);
   2849	synchronize_rcu();
   2850}
   2851
   2852/*
   2853 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
   2854 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
   2855 * not a module.
   2856 */
   2857#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
   2858#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
   2859
   2860static void ftrace_trampoline_free(struct ftrace_ops *ops)
   2861{
   2862	if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
   2863	    ops->trampoline) {
   2864		/*
   2865		 * Record the text poke event before the ksymbol unregister
   2866		 * event.
   2867		 */
   2868		perf_event_text_poke((void *)ops->trampoline,
   2869				     (void *)ops->trampoline,
   2870				     ops->trampoline_size, NULL, 0);
   2871		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
   2872				   ops->trampoline, ops->trampoline_size,
   2873				   true, FTRACE_TRAMPOLINE_SYM);
   2874		/* Remove from kallsyms after the perf events */
   2875		ftrace_remove_trampoline_from_kallsyms(ops);
   2876	}
   2877
   2878	arch_ftrace_trampoline_free(ops);
   2879}
   2880
   2881static void ftrace_startup_enable(int command)
   2882{
   2883	if (saved_ftrace_func != ftrace_trace_function) {
   2884		saved_ftrace_func = ftrace_trace_function;
   2885		command |= FTRACE_UPDATE_TRACE_FUNC;
   2886	}
   2887
   2888	if (!command || !ftrace_enabled)
   2889		return;
   2890
   2891	ftrace_run_update_code(command);
   2892}
   2893
   2894static void ftrace_startup_all(int command)
   2895{
   2896	update_all_ops = true;
   2897	ftrace_startup_enable(command);
   2898	update_all_ops = false;
   2899}
   2900
   2901int ftrace_startup(struct ftrace_ops *ops, int command)
   2902{
   2903	int ret;
   2904
   2905	if (unlikely(ftrace_disabled))
   2906		return -ENODEV;
   2907
   2908	ret = __register_ftrace_function(ops);
   2909	if (ret)
   2910		return ret;
   2911
   2912	ftrace_start_up++;
   2913
   2914	/*
   2915	 * Note that ftrace probes uses this to start up
   2916	 * and modify functions it will probe. But we still
   2917	 * set the ADDING flag for modification, as probes
   2918	 * do not have trampolines. If they add them in the
   2919	 * future, then the probes will need to distinguish
   2920	 * between adding and updating probes.
   2921	 */
   2922	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
   2923
   2924	ret = ftrace_hash_ipmodify_enable(ops);
   2925	if (ret < 0) {
   2926		/* Rollback registration process */
   2927		__unregister_ftrace_function(ops);
   2928		ftrace_start_up--;
   2929		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
   2930		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
   2931			ftrace_trampoline_free(ops);
   2932		return ret;
   2933	}
   2934
   2935	if (ftrace_hash_rec_enable(ops, 1))
   2936		command |= FTRACE_UPDATE_CALLS;
   2937
   2938	ftrace_startup_enable(command);
   2939
   2940	ops->flags &= ~FTRACE_OPS_FL_ADDING;
   2941
   2942	return 0;
   2943}
   2944
   2945int ftrace_shutdown(struct ftrace_ops *ops, int command)
   2946{
   2947	int ret;
   2948
   2949	if (unlikely(ftrace_disabled))
   2950		return -ENODEV;
   2951
   2952	ret = __unregister_ftrace_function(ops);
   2953	if (ret)
   2954		return ret;
   2955
   2956	ftrace_start_up--;
   2957	/*
   2958	 * Just warn in case of unbalance, no need to kill ftrace, it's not
   2959	 * critical but the ftrace_call callers may be never nopped again after
   2960	 * further ftrace uses.
   2961	 */
   2962	WARN_ON_ONCE(ftrace_start_up < 0);
   2963
   2964	/* Disabling ipmodify never fails */
   2965	ftrace_hash_ipmodify_disable(ops);
   2966
   2967	if (ftrace_hash_rec_disable(ops, 1))
   2968		command |= FTRACE_UPDATE_CALLS;
   2969
   2970	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
   2971
   2972	if (saved_ftrace_func != ftrace_trace_function) {
   2973		saved_ftrace_func = ftrace_trace_function;
   2974		command |= FTRACE_UPDATE_TRACE_FUNC;
   2975	}
   2976
   2977	if (!command || !ftrace_enabled) {
   2978		/*
   2979		 * If these are dynamic or per_cpu ops, they still
   2980		 * need their data freed. Since, function tracing is
   2981		 * not currently active, we can just free them
   2982		 * without synchronizing all CPUs.
   2983		 */
   2984		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
   2985			goto free_ops;
   2986
   2987		return 0;
   2988	}
   2989
   2990	/*
   2991	 * If the ops uses a trampoline, then it needs to be
   2992	 * tested first on update.
   2993	 */
   2994	ops->flags |= FTRACE_OPS_FL_REMOVING;
   2995	removed_ops = ops;
   2996
   2997	/* The trampoline logic checks the old hashes */
   2998	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
   2999	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
   3000
   3001	ftrace_run_update_code(command);
   3002
   3003	/*
   3004	 * If there's no more ops registered with ftrace, run a
   3005	 * sanity check to make sure all rec flags are cleared.
   3006	 */
   3007	if (rcu_dereference_protected(ftrace_ops_list,
   3008			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
   3009		struct ftrace_page *pg;
   3010		struct dyn_ftrace *rec;
   3011
   3012		do_for_each_ftrace_rec(pg, rec) {
   3013			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
   3014				pr_warn("  %pS flags:%lx\n",
   3015					(void *)rec->ip, rec->flags);
   3016		} while_for_each_ftrace_rec();
   3017	}
   3018
   3019	ops->old_hash.filter_hash = NULL;
   3020	ops->old_hash.notrace_hash = NULL;
   3021
   3022	removed_ops = NULL;
   3023	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
   3024
   3025	/*
   3026	 * Dynamic ops may be freed, we must make sure that all
   3027	 * callers are done before leaving this function.
   3028	 * The same goes for freeing the per_cpu data of the per_cpu
   3029	 * ops.
   3030	 */
   3031	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
   3032		/*
   3033		 * We need to do a hard force of sched synchronization.
   3034		 * This is because we use preempt_disable() to do RCU, but
   3035		 * the function tracers can be called where RCU is not watching
   3036		 * (like before user_exit()). We can not rely on the RCU
   3037		 * infrastructure to do the synchronization, thus we must do it
   3038		 * ourselves.
   3039		 */
   3040		synchronize_rcu_tasks_rude();
   3041
   3042		/*
   3043		 * When the kernel is preemptive, tasks can be preempted
   3044		 * while on a ftrace trampoline. Just scheduling a task on
   3045		 * a CPU is not good enough to flush them. Calling
   3046		 * synchronize_rcu_tasks() will wait for those tasks to
   3047		 * execute and either schedule voluntarily or enter user space.
   3048		 */
   3049		if (IS_ENABLED(CONFIG_PREEMPTION))
   3050			synchronize_rcu_tasks();
   3051
   3052 free_ops:
   3053		ftrace_trampoline_free(ops);
   3054	}
   3055
   3056	return 0;
   3057}
   3058
   3059static u64		ftrace_update_time;
   3060unsigned long		ftrace_update_tot_cnt;
   3061unsigned long		ftrace_number_of_pages;
   3062unsigned long		ftrace_number_of_groups;
   3063
   3064static inline int ops_traces_mod(struct ftrace_ops *ops)
   3065{
   3066	/*
   3067	 * Filter_hash being empty will default to trace module.
   3068	 * But notrace hash requires a test of individual module functions.
   3069	 */
   3070	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
   3071		ftrace_hash_empty(ops->func_hash->notrace_hash);
   3072}
   3073
   3074/*
   3075 * Check if the current ops references the record.
   3076 *
   3077 * If the ops traces all functions, then it was already accounted for.
   3078 * If the ops does not trace the current record function, skip it.
   3079 * If the ops ignores the function via notrace filter, skip it.
   3080 */
   3081static inline bool
   3082ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
   3083{
   3084	/* If ops isn't enabled, ignore it */
   3085	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
   3086		return false;
   3087
   3088	/* If ops traces all then it includes this function */
   3089	if (ops_traces_mod(ops))
   3090		return true;
   3091
   3092	/* The function must be in the filter */
   3093	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
   3094	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
   3095		return false;
   3096
   3097	/* If in notrace hash, we ignore it too */
   3098	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
   3099		return false;
   3100
   3101	return true;
   3102}
   3103
   3104static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
   3105{
   3106	bool init_nop = ftrace_need_init_nop();
   3107	struct ftrace_page *pg;
   3108	struct dyn_ftrace *p;
   3109	u64 start, stop;
   3110	unsigned long update_cnt = 0;
   3111	unsigned long rec_flags = 0;
   3112	int i;
   3113
   3114	start = ftrace_now(raw_smp_processor_id());
   3115
   3116	/*
   3117	 * When a module is loaded, this function is called to convert
   3118	 * the calls to mcount in its text to nops, and also to create
   3119	 * an entry in the ftrace data. Now, if ftrace is activated
   3120	 * after this call, but before the module sets its text to
   3121	 * read-only, the modification of enabling ftrace can fail if
   3122	 * the read-only is done while ftrace is converting the calls.
   3123	 * To prevent this, the module's records are set as disabled
   3124	 * and will be enabled after the call to set the module's text
   3125	 * to read-only.
   3126	 */
   3127	if (mod)
   3128		rec_flags |= FTRACE_FL_DISABLED;
   3129
   3130	for (pg = new_pgs; pg; pg = pg->next) {
   3131
   3132		for (i = 0; i < pg->index; i++) {
   3133
   3134			/* If something went wrong, bail without enabling anything */
   3135			if (unlikely(ftrace_disabled))
   3136				return -1;
   3137
   3138			p = &pg->records[i];
   3139			p->flags = rec_flags;
   3140
   3141			/*
   3142			 * Do the initial record conversion from mcount jump
   3143			 * to the NOP instructions.
   3144			 */
   3145			if (init_nop && !ftrace_nop_initialize(mod, p))
   3146				break;
   3147
   3148			update_cnt++;
   3149		}
   3150	}
   3151
   3152	stop = ftrace_now(raw_smp_processor_id());
   3153	ftrace_update_time = stop - start;
   3154	ftrace_update_tot_cnt += update_cnt;
   3155
   3156	return 0;
   3157}
   3158
   3159static int ftrace_allocate_records(struct ftrace_page *pg, int count)
   3160{
   3161	int order;
   3162	int pages;
   3163	int cnt;
   3164
   3165	if (WARN_ON(!count))
   3166		return -EINVAL;
   3167
   3168	/* We want to fill as much as possible, with no empty pages */
   3169	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
   3170	order = fls(pages) - 1;
   3171
   3172 again:
   3173	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
   3174
   3175	if (!pg->records) {
   3176		/* if we can't allocate this size, try something smaller */
   3177		if (!order)
   3178			return -ENOMEM;
   3179		order >>= 1;
   3180		goto again;
   3181	}
   3182
   3183	ftrace_number_of_pages += 1 << order;
   3184	ftrace_number_of_groups++;
   3185
   3186	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
   3187	pg->order = order;
   3188
   3189	if (cnt > count)
   3190		cnt = count;
   3191
   3192	return cnt;
   3193}
   3194
   3195static struct ftrace_page *
   3196ftrace_allocate_pages(unsigned long num_to_init)
   3197{
   3198	struct ftrace_page *start_pg;
   3199	struct ftrace_page *pg;
   3200	int cnt;
   3201
   3202	if (!num_to_init)
   3203		return NULL;
   3204
   3205	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
   3206	if (!pg)
   3207		return NULL;
   3208
   3209	/*
   3210	 * Try to allocate as much as possible in one continues
   3211	 * location that fills in all of the space. We want to
   3212	 * waste as little space as possible.
   3213	 */
   3214	for (;;) {
   3215		cnt = ftrace_allocate_records(pg, num_to_init);
   3216		if (cnt < 0)
   3217			goto free_pages;
   3218
   3219		num_to_init -= cnt;
   3220		if (!num_to_init)
   3221			break;
   3222
   3223		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
   3224		if (!pg->next)
   3225			goto free_pages;
   3226
   3227		pg = pg->next;
   3228	}
   3229
   3230	return start_pg;
   3231
   3232 free_pages:
   3233	pg = start_pg;
   3234	while (pg) {
   3235		if (pg->records) {
   3236			free_pages((unsigned long)pg->records, pg->order);
   3237			ftrace_number_of_pages -= 1 << pg->order;
   3238		}
   3239		start_pg = pg->next;
   3240		kfree(pg);
   3241		pg = start_pg;
   3242		ftrace_number_of_groups--;
   3243	}
   3244	pr_info("ftrace: FAILED to allocate memory for functions\n");
   3245	return NULL;
   3246}
   3247
   3248#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
   3249
   3250struct ftrace_iterator {
   3251	loff_t				pos;
   3252	loff_t				func_pos;
   3253	loff_t				mod_pos;
   3254	struct ftrace_page		*pg;
   3255	struct dyn_ftrace		*func;
   3256	struct ftrace_func_probe	*probe;
   3257	struct ftrace_func_entry	*probe_entry;
   3258	struct trace_parser		parser;
   3259	struct ftrace_hash		*hash;
   3260	struct ftrace_ops		*ops;
   3261	struct trace_array		*tr;
   3262	struct list_head		*mod_list;
   3263	int				pidx;
   3264	int				idx;
   3265	unsigned			flags;
   3266};
   3267
   3268static void *
   3269t_probe_next(struct seq_file *m, loff_t *pos)
   3270{
   3271	struct ftrace_iterator *iter = m->private;
   3272	struct trace_array *tr = iter->ops->private;
   3273	struct list_head *func_probes;
   3274	struct ftrace_hash *hash;
   3275	struct list_head *next;
   3276	struct hlist_node *hnd = NULL;
   3277	struct hlist_head *hhd;
   3278	int size;
   3279
   3280	(*pos)++;
   3281	iter->pos = *pos;
   3282
   3283	if (!tr)
   3284		return NULL;
   3285
   3286	func_probes = &tr->func_probes;
   3287	if (list_empty(func_probes))
   3288		return NULL;
   3289
   3290	if (!iter->probe) {
   3291		next = func_probes->next;
   3292		iter->probe = list_entry(next, struct ftrace_func_probe, list);
   3293	}
   3294
   3295	if (iter->probe_entry)
   3296		hnd = &iter->probe_entry->hlist;
   3297
   3298	hash = iter->probe->ops.func_hash->filter_hash;
   3299
   3300	/*
   3301	 * A probe being registered may temporarily have an empty hash
   3302	 * and it's at the end of the func_probes list.
   3303	 */
   3304	if (!hash || hash == EMPTY_HASH)
   3305		return NULL;
   3306
   3307	size = 1 << hash->size_bits;
   3308
   3309 retry:
   3310	if (iter->pidx >= size) {
   3311		if (iter->probe->list.next == func_probes)
   3312			return NULL;
   3313		next = iter->probe->list.next;
   3314		iter->probe = list_entry(next, struct ftrace_func_probe, list);
   3315		hash = iter->probe->ops.func_hash->filter_hash;
   3316		size = 1 << hash->size_bits;
   3317		iter->pidx = 0;
   3318	}
   3319
   3320	hhd = &hash->buckets[iter->pidx];
   3321
   3322	if (hlist_empty(hhd)) {
   3323		iter->pidx++;
   3324		hnd = NULL;
   3325		goto retry;
   3326	}
   3327
   3328	if (!hnd)
   3329		hnd = hhd->first;
   3330	else {
   3331		hnd = hnd->next;
   3332		if (!hnd) {
   3333			iter->pidx++;
   3334			goto retry;
   3335		}
   3336	}
   3337
   3338	if (WARN_ON_ONCE(!hnd))
   3339		return NULL;
   3340
   3341	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
   3342
   3343	return iter;
   3344}
   3345
   3346static void *t_probe_start(struct seq_file *m, loff_t *pos)
   3347{
   3348	struct ftrace_iterator *iter = m->private;
   3349	void *p = NULL;
   3350	loff_t l;
   3351
   3352	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
   3353		return NULL;
   3354
   3355	if (iter->mod_pos > *pos)
   3356		return NULL;
   3357
   3358	iter->probe = NULL;
   3359	iter->probe_entry = NULL;
   3360	iter->pidx = 0;
   3361	for (l = 0; l <= (*pos - iter->mod_pos); ) {
   3362		p = t_probe_next(m, &l);
   3363		if (!p)
   3364			break;
   3365	}
   3366	if (!p)
   3367		return NULL;
   3368
   3369	/* Only set this if we have an item */
   3370	iter->flags |= FTRACE_ITER_PROBE;
   3371
   3372	return iter;
   3373}
   3374
   3375static int
   3376t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
   3377{
   3378	struct ftrace_func_entry *probe_entry;
   3379	struct ftrace_probe_ops *probe_ops;
   3380	struct ftrace_func_probe *probe;
   3381
   3382	probe = iter->probe;
   3383	probe_entry = iter->probe_entry;
   3384
   3385	if (WARN_ON_ONCE(!probe || !probe_entry))
   3386		return -EIO;
   3387
   3388	probe_ops = probe->probe_ops;
   3389
   3390	if (probe_ops->print)
   3391		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
   3392
   3393	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
   3394		   (void *)probe_ops->func);
   3395
   3396	return 0;
   3397}
   3398
   3399static void *
   3400t_mod_next(struct seq_file *m, loff_t *pos)
   3401{
   3402	struct ftrace_iterator *iter = m->private;
   3403	struct trace_array *tr = iter->tr;
   3404
   3405	(*pos)++;
   3406	iter->pos = *pos;
   3407
   3408	iter->mod_list = iter->mod_list->next;
   3409
   3410	if (iter->mod_list == &tr->mod_trace ||
   3411	    iter->mod_list == &tr->mod_notrace) {
   3412		iter->flags &= ~FTRACE_ITER_MOD;
   3413		return NULL;
   3414	}
   3415
   3416	iter->mod_pos = *pos;
   3417
   3418	return iter;
   3419}
   3420
   3421static void *t_mod_start(struct seq_file *m, loff_t *pos)
   3422{
   3423	struct ftrace_iterator *iter = m->private;
   3424	void *p = NULL;
   3425	loff_t l;
   3426
   3427	if (iter->func_pos > *pos)
   3428		return NULL;
   3429
   3430	iter->mod_pos = iter->func_pos;
   3431
   3432	/* probes are only available if tr is set */
   3433	if (!iter->tr)
   3434		return NULL;
   3435
   3436	for (l = 0; l <= (*pos - iter->func_pos); ) {
   3437		p = t_mod_next(m, &l);
   3438		if (!p)
   3439			break;
   3440	}
   3441	if (!p) {
   3442		iter->flags &= ~FTRACE_ITER_MOD;
   3443		return t_probe_start(m, pos);
   3444	}
   3445
   3446	/* Only set this if we have an item */
   3447	iter->flags |= FTRACE_ITER_MOD;
   3448
   3449	return iter;
   3450}
   3451
   3452static int
   3453t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
   3454{
   3455	struct ftrace_mod_load *ftrace_mod;
   3456	struct trace_array *tr = iter->tr;
   3457
   3458	if (WARN_ON_ONCE(!iter->mod_list) ||
   3459			 iter->mod_list == &tr->mod_trace ||
   3460			 iter->mod_list == &tr->mod_notrace)
   3461		return -EIO;
   3462
   3463	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
   3464
   3465	if (ftrace_mod->func)
   3466		seq_printf(m, "%s", ftrace_mod->func);
   3467	else
   3468		seq_putc(m, '*');
   3469
   3470	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
   3471
   3472	return 0;
   3473}
   3474
   3475static void *
   3476t_func_next(struct seq_file *m, loff_t *pos)
   3477{
   3478	struct ftrace_iterator *iter = m->private;
   3479	struct dyn_ftrace *rec = NULL;
   3480
   3481	(*pos)++;
   3482
   3483 retry:
   3484	if (iter->idx >= iter->pg->index) {
   3485		if (iter->pg->next) {
   3486			iter->pg = iter->pg->next;
   3487			iter->idx = 0;
   3488			goto retry;
   3489		}
   3490	} else {
   3491		rec = &iter->pg->records[iter->idx++];
   3492		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
   3493		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
   3494
   3495		    ((iter->flags & FTRACE_ITER_ENABLED) &&
   3496		     !(rec->flags & FTRACE_FL_ENABLED))) {
   3497
   3498			rec = NULL;
   3499			goto retry;
   3500		}
   3501	}
   3502
   3503	if (!rec)
   3504		return NULL;
   3505
   3506	iter->pos = iter->func_pos = *pos;
   3507	iter->func = rec;
   3508
   3509	return iter;
   3510}
   3511
   3512static void *
   3513t_next(struct seq_file *m, void *v, loff_t *pos)
   3514{
   3515	struct ftrace_iterator *iter = m->private;
   3516	loff_t l = *pos; /* t_probe_start() must use original pos */
   3517	void *ret;
   3518
   3519	if (unlikely(ftrace_disabled))
   3520		return NULL;
   3521
   3522	if (iter->flags & FTRACE_ITER_PROBE)
   3523		return t_probe_next(m, pos);
   3524
   3525	if (iter->flags & FTRACE_ITER_MOD)
   3526		return t_mod_next(m, pos);
   3527
   3528	if (iter->flags & FTRACE_ITER_PRINTALL) {
   3529		/* next must increment pos, and t_probe_start does not */
   3530		(*pos)++;
   3531		return t_mod_start(m, &l);
   3532	}
   3533
   3534	ret = t_func_next(m, pos);
   3535
   3536	if (!ret)
   3537		return t_mod_start(m, &l);
   3538
   3539	return ret;
   3540}
   3541
   3542static void reset_iter_read(struct ftrace_iterator *iter)
   3543{
   3544	iter->pos = 0;
   3545	iter->func_pos = 0;
   3546	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
   3547}
   3548
   3549static void *t_start(struct seq_file *m, loff_t *pos)
   3550{
   3551	struct ftrace_iterator *iter = m->private;
   3552	void *p = NULL;
   3553	loff_t l;
   3554
   3555	mutex_lock(&ftrace_lock);
   3556
   3557	if (unlikely(ftrace_disabled))
   3558		return NULL;
   3559
   3560	/*
   3561	 * If an lseek was done, then reset and start from beginning.
   3562	 */
   3563	if (*pos < iter->pos)
   3564		reset_iter_read(iter);
   3565
   3566	/*
   3567	 * For set_ftrace_filter reading, if we have the filter
   3568	 * off, we can short cut and just print out that all
   3569	 * functions are enabled.
   3570	 */
   3571	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
   3572	    ftrace_hash_empty(iter->hash)) {
   3573		iter->func_pos = 1; /* Account for the message */
   3574		if (*pos > 0)
   3575			return t_mod_start(m, pos);
   3576		iter->flags |= FTRACE_ITER_PRINTALL;
   3577		/* reset in case of seek/pread */
   3578		iter->flags &= ~FTRACE_ITER_PROBE;
   3579		return iter;
   3580	}
   3581
   3582	if (iter->flags & FTRACE_ITER_MOD)
   3583		return t_mod_start(m, pos);
   3584
   3585	/*
   3586	 * Unfortunately, we need to restart at ftrace_pages_start
   3587	 * every time we let go of the ftrace_mutex. This is because
   3588	 * those pointers can change without the lock.
   3589	 */
   3590	iter->pg = ftrace_pages_start;
   3591	iter->idx = 0;
   3592	for (l = 0; l <= *pos; ) {
   3593		p = t_func_next(m, &l);
   3594		if (!p)
   3595			break;
   3596	}
   3597
   3598	if (!p)
   3599		return t_mod_start(m, pos);
   3600
   3601	return iter;
   3602}
   3603
   3604static void t_stop(struct seq_file *m, void *p)
   3605{
   3606	mutex_unlock(&ftrace_lock);
   3607}
   3608
   3609void * __weak
   3610arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
   3611{
   3612	return NULL;
   3613}
   3614
   3615static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
   3616				struct dyn_ftrace *rec)
   3617{
   3618	void *ptr;
   3619
   3620	ptr = arch_ftrace_trampoline_func(ops, rec);
   3621	if (ptr)
   3622		seq_printf(m, " ->%pS", ptr);
   3623}
   3624
   3625#ifdef FTRACE_MCOUNT_MAX_OFFSET
   3626/*
   3627 * Weak functions can still have an mcount/fentry that is saved in
   3628 * the __mcount_loc section. These can be detected by having a
   3629 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
   3630 * symbol found by kallsyms is not the function that the mcount/fentry
   3631 * is part of. The offset is much greater in these cases.
   3632 *
   3633 * Test the record to make sure that the ip points to a valid kallsyms
   3634 * and if not, mark it disabled.
   3635 */
   3636static int test_for_valid_rec(struct dyn_ftrace *rec)
   3637{
   3638	char str[KSYM_SYMBOL_LEN];
   3639	unsigned long offset;
   3640	const char *ret;
   3641
   3642	ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
   3643
   3644	/* Weak functions can cause invalid addresses */
   3645	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
   3646		rec->flags |= FTRACE_FL_DISABLED;
   3647		return 0;
   3648	}
   3649	return 1;
   3650}
   3651
   3652static struct workqueue_struct *ftrace_check_wq __initdata;
   3653static struct work_struct ftrace_check_work __initdata;
   3654
   3655/*
   3656 * Scan all the mcount/fentry entries to make sure they are valid.
   3657 */
   3658static __init void ftrace_check_work_func(struct work_struct *work)
   3659{
   3660	struct ftrace_page *pg;
   3661	struct dyn_ftrace *rec;
   3662
   3663	mutex_lock(&ftrace_lock);
   3664	do_for_each_ftrace_rec(pg, rec) {
   3665		test_for_valid_rec(rec);
   3666	} while_for_each_ftrace_rec();
   3667	mutex_unlock(&ftrace_lock);
   3668}
   3669
   3670static int __init ftrace_check_for_weak_functions(void)
   3671{
   3672	INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
   3673
   3674	ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
   3675
   3676	queue_work(ftrace_check_wq, &ftrace_check_work);
   3677	return 0;
   3678}
   3679
   3680static int __init ftrace_check_sync(void)
   3681{
   3682	/* Make sure the ftrace_check updates are finished */
   3683	if (ftrace_check_wq)
   3684		destroy_workqueue(ftrace_check_wq);
   3685	return 0;
   3686}
   3687
   3688late_initcall_sync(ftrace_check_sync);
   3689subsys_initcall(ftrace_check_for_weak_functions);
   3690
   3691static int print_rec(struct seq_file *m, unsigned long ip)
   3692{
   3693	unsigned long offset;
   3694	char str[KSYM_SYMBOL_LEN];
   3695	char *modname;
   3696	const char *ret;
   3697
   3698	ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
   3699	/* Weak functions can cause invalid addresses */
   3700	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
   3701		snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
   3702			 FTRACE_INVALID_FUNCTION, offset);
   3703		ret = NULL;
   3704	}
   3705
   3706	seq_puts(m, str);
   3707	if (modname)
   3708		seq_printf(m, " [%s]", modname);
   3709	return ret == NULL ? -1 : 0;
   3710}
   3711#else
   3712static inline int test_for_valid_rec(struct dyn_ftrace *rec)
   3713{
   3714	return 1;
   3715}
   3716
   3717static inline int print_rec(struct seq_file *m, unsigned long ip)
   3718{
   3719	seq_printf(m, "%ps", (void *)ip);
   3720	return 0;
   3721}
   3722#endif
   3723
   3724static int t_show(struct seq_file *m, void *v)
   3725{
   3726	struct ftrace_iterator *iter = m->private;
   3727	struct dyn_ftrace *rec;
   3728
   3729	if (iter->flags & FTRACE_ITER_PROBE)
   3730		return t_probe_show(m, iter);
   3731
   3732	if (iter->flags & FTRACE_ITER_MOD)
   3733		return t_mod_show(m, iter);
   3734
   3735	if (iter->flags & FTRACE_ITER_PRINTALL) {
   3736		if (iter->flags & FTRACE_ITER_NOTRACE)
   3737			seq_puts(m, "#### no functions disabled ####\n");
   3738		else
   3739			seq_puts(m, "#### all functions enabled ####\n");
   3740		return 0;
   3741	}
   3742
   3743	rec = iter->func;
   3744
   3745	if (!rec)
   3746		return 0;
   3747
   3748	if (print_rec(m, rec->ip)) {
   3749		/* This should only happen when a rec is disabled */
   3750		WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
   3751		seq_putc(m, '\n');
   3752		return 0;
   3753	}
   3754
   3755	if (iter->flags & FTRACE_ITER_ENABLED) {
   3756		struct ftrace_ops *ops;
   3757
   3758		seq_printf(m, " (%ld)%s%s%s",
   3759			   ftrace_rec_count(rec),
   3760			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
   3761			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
   3762			   rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
   3763		if (rec->flags & FTRACE_FL_TRAMP_EN) {
   3764			ops = ftrace_find_tramp_ops_any(rec);
   3765			if (ops) {
   3766				do {
   3767					seq_printf(m, "\ttramp: %pS (%pS)",
   3768						   (void *)ops->trampoline,
   3769						   (void *)ops->func);
   3770					add_trampoline_func(m, ops, rec);
   3771					ops = ftrace_find_tramp_ops_next(rec, ops);
   3772				} while (ops);
   3773			} else
   3774				seq_puts(m, "\ttramp: ERROR!");
   3775		} else {
   3776			add_trampoline_func(m, NULL, rec);
   3777		}
   3778		if (rec->flags & FTRACE_FL_DIRECT) {
   3779			unsigned long direct;
   3780
   3781			direct = ftrace_find_rec_direct(rec->ip);
   3782			if (direct)
   3783				seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
   3784		}
   3785	}
   3786
   3787	seq_putc(m, '\n');
   3788
   3789	return 0;
   3790}
   3791
   3792static const struct seq_operations show_ftrace_seq_ops = {
   3793	.start = t_start,
   3794	.next = t_next,
   3795	.stop = t_stop,
   3796	.show = t_show,
   3797};
   3798
   3799static int
   3800ftrace_avail_open(struct inode *inode, struct file *file)
   3801{
   3802	struct ftrace_iterator *iter;
   3803	int ret;
   3804
   3805	ret = security_locked_down(LOCKDOWN_TRACEFS);
   3806	if (ret)
   3807		return ret;
   3808
   3809	if (unlikely(ftrace_disabled))
   3810		return -ENODEV;
   3811
   3812	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
   3813	if (!iter)
   3814		return -ENOMEM;
   3815
   3816	iter->pg = ftrace_pages_start;
   3817	iter->ops = &global_ops;
   3818
   3819	return 0;
   3820}
   3821
   3822static int
   3823ftrace_enabled_open(struct inode *inode, struct file *file)
   3824{
   3825	struct ftrace_iterator *iter;
   3826
   3827	/*
   3828	 * This shows us what functions are currently being
   3829	 * traced and by what. Not sure if we want lockdown
   3830	 * to hide such critical information for an admin.
   3831	 * Although, perhaps it can show information we don't
   3832	 * want people to see, but if something is tracing
   3833	 * something, we probably want to know about it.
   3834	 */
   3835
   3836	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
   3837	if (!iter)
   3838		return -ENOMEM;
   3839
   3840	iter->pg = ftrace_pages_start;
   3841	iter->flags = FTRACE_ITER_ENABLED;
   3842	iter->ops = &global_ops;
   3843
   3844	return 0;
   3845}
   3846
   3847/**
   3848 * ftrace_regex_open - initialize function tracer filter files
   3849 * @ops: The ftrace_ops that hold the hash filters
   3850 * @flag: The type of filter to process
   3851 * @inode: The inode, usually passed in to your open routine
   3852 * @file: The file, usually passed in to your open routine
   3853 *
   3854 * ftrace_regex_open() initializes the filter files for the
   3855 * @ops. Depending on @flag it may process the filter hash or
   3856 * the notrace hash of @ops. With this called from the open
   3857 * routine, you can use ftrace_filter_write() for the write
   3858 * routine if @flag has FTRACE_ITER_FILTER set, or
   3859 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
   3860 * tracing_lseek() should be used as the lseek routine, and
   3861 * release must call ftrace_regex_release().
   3862 */
   3863int
   3864ftrace_regex_open(struct ftrace_ops *ops, int flag,
   3865		  struct inode *inode, struct file *file)
   3866{
   3867	struct ftrace_iterator *iter;
   3868	struct ftrace_hash *hash;
   3869	struct list_head *mod_head;
   3870	struct trace_array *tr = ops->private;
   3871	int ret = -ENOMEM;
   3872
   3873	ftrace_ops_init(ops);
   3874
   3875	if (unlikely(ftrace_disabled))
   3876		return -ENODEV;
   3877
   3878	if (tracing_check_open_get_tr(tr))
   3879		return -ENODEV;
   3880
   3881	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
   3882	if (!iter)
   3883		goto out;
   3884
   3885	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
   3886		goto out;
   3887
   3888	iter->ops = ops;
   3889	iter->flags = flag;
   3890	iter->tr = tr;
   3891
   3892	mutex_lock(&ops->func_hash->regex_lock);
   3893
   3894	if (flag & FTRACE_ITER_NOTRACE) {
   3895		hash = ops->func_hash->notrace_hash;
   3896		mod_head = tr ? &tr->mod_notrace : NULL;
   3897	} else {
   3898		hash = ops->func_hash->filter_hash;
   3899		mod_head = tr ? &tr->mod_trace : NULL;
   3900	}
   3901
   3902	iter->mod_list = mod_head;
   3903
   3904	if (file->f_mode & FMODE_WRITE) {
   3905		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
   3906
   3907		if (file->f_flags & O_TRUNC) {
   3908			iter->hash = alloc_ftrace_hash(size_bits);
   3909			clear_ftrace_mod_list(mod_head);
   3910	        } else {
   3911			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
   3912		}
   3913
   3914		if (!iter->hash) {
   3915			trace_parser_put(&iter->parser);
   3916			goto out_unlock;
   3917		}
   3918	} else
   3919		iter->hash = hash;
   3920
   3921	ret = 0;
   3922
   3923	if (file->f_mode & FMODE_READ) {
   3924		iter->pg = ftrace_pages_start;
   3925
   3926		ret = seq_open(file, &show_ftrace_seq_ops);
   3927		if (!ret) {
   3928			struct seq_file *m = file->private_data;
   3929			m->private = iter;
   3930		} else {
   3931			/* Failed */
   3932			free_ftrace_hash(iter->hash);
   3933			trace_parser_put(&iter->parser);
   3934		}
   3935	} else
   3936		file->private_data = iter;
   3937
   3938 out_unlock:
   3939	mutex_unlock(&ops->func_hash->regex_lock);
   3940
   3941 out:
   3942	if (ret) {
   3943		kfree(iter);
   3944		if (tr)
   3945			trace_array_put(tr);
   3946	}
   3947
   3948	return ret;
   3949}
   3950
   3951static int
   3952ftrace_filter_open(struct inode *inode, struct file *file)
   3953{
   3954	struct ftrace_ops *ops = inode->i_private;
   3955
   3956	/* Checks for tracefs lockdown */
   3957	return ftrace_regex_open(ops,
   3958			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
   3959			inode, file);
   3960}
   3961
   3962static int
   3963ftrace_notrace_open(struct inode *inode, struct file *file)
   3964{
   3965	struct ftrace_ops *ops = inode->i_private;
   3966
   3967	/* Checks for tracefs lockdown */
   3968	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
   3969				 inode, file);
   3970}
   3971
   3972/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
   3973struct ftrace_glob {
   3974	char *search;
   3975	unsigned len;
   3976	int type;
   3977};
   3978
   3979/*
   3980 * If symbols in an architecture don't correspond exactly to the user-visible
   3981 * name of what they represent, it is possible to define this function to
   3982 * perform the necessary adjustments.
   3983*/
   3984char * __weak arch_ftrace_match_adjust(char *str, const char *search)
   3985{
   3986	return str;
   3987}
   3988
   3989static int ftrace_match(char *str, struct ftrace_glob *g)
   3990{
   3991	int matched = 0;
   3992	int slen;
   3993
   3994	str = arch_ftrace_match_adjust(str, g->search);
   3995
   3996	switch (g->type) {
   3997	case MATCH_FULL:
   3998		if (strcmp(str, g->search) == 0)
   3999			matched = 1;
   4000		break;
   4001	case MATCH_FRONT_ONLY:
   4002		if (strncmp(str, g->search, g->len) == 0)
   4003			matched = 1;
   4004		break;
   4005	case MATCH_MIDDLE_ONLY:
   4006		if (strstr(str, g->search))
   4007			matched = 1;
   4008		break;
   4009	case MATCH_END_ONLY:
   4010		slen = strlen(str);
   4011		if (slen >= g->len &&
   4012		    memcmp(str + slen - g->len, g->search, g->len) == 0)
   4013			matched = 1;
   4014		break;
   4015	case MATCH_GLOB:
   4016		if (glob_match(g->search, str))
   4017			matched = 1;
   4018		break;
   4019	}
   4020
   4021	return matched;
   4022}
   4023
   4024static int
   4025enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
   4026{
   4027	struct ftrace_func_entry *entry;
   4028	int ret = 0;
   4029
   4030	entry = ftrace_lookup_ip(hash, rec->ip);
   4031	if (clear_filter) {
   4032		/* Do nothing if it doesn't exist */
   4033		if (!entry)
   4034			return 0;
   4035
   4036		free_hash_entry(hash, entry);
   4037	} else {
   4038		/* Do nothing if it exists */
   4039		if (entry)
   4040			return 0;
   4041
   4042		ret = add_hash_entry(hash, rec->ip);
   4043	}
   4044	return ret;
   4045}
   4046
   4047static int
   4048add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
   4049		 int clear_filter)
   4050{
   4051	long index = simple_strtoul(func_g->search, NULL, 0);
   4052	struct ftrace_page *pg;
   4053	struct dyn_ftrace *rec;
   4054
   4055	/* The index starts at 1 */
   4056	if (--index < 0)
   4057		return 0;
   4058
   4059	do_for_each_ftrace_rec(pg, rec) {
   4060		if (pg->index <= index) {
   4061			index -= pg->index;
   4062			/* this is a double loop, break goes to the next page */
   4063			break;
   4064		}
   4065		rec = &pg->records[index];
   4066		enter_record(hash, rec, clear_filter);
   4067		return 1;
   4068	} while_for_each_ftrace_rec();
   4069	return 0;
   4070}
   4071
   4072#ifdef FTRACE_MCOUNT_MAX_OFFSET
   4073static int lookup_ip(unsigned long ip, char **modname, char *str)
   4074{
   4075	unsigned long offset;
   4076
   4077	kallsyms_lookup(ip, NULL, &offset, modname, str);
   4078	if (offset > FTRACE_MCOUNT_MAX_OFFSET)
   4079		return -1;
   4080	return 0;
   4081}
   4082#else
   4083static int lookup_ip(unsigned long ip, char **modname, char *str)
   4084{
   4085	kallsyms_lookup(ip, NULL, NULL, modname, str);
   4086	return 0;
   4087}
   4088#endif
   4089
   4090static int
   4091ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
   4092		struct ftrace_glob *mod_g, int exclude_mod)
   4093{
   4094	char str[KSYM_SYMBOL_LEN];
   4095	char *modname;
   4096
   4097	if (lookup_ip(rec->ip, &modname, str)) {
   4098		/* This should only happen when a rec is disabled */
   4099		WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
   4100			     !(rec->flags & FTRACE_FL_DISABLED));
   4101		return 0;
   4102	}
   4103
   4104	if (mod_g) {
   4105		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
   4106
   4107		/* blank module name to match all modules */
   4108		if (!mod_g->len) {
   4109			/* blank module globbing: modname xor exclude_mod */
   4110			if (!exclude_mod != !modname)
   4111				goto func_match;
   4112			return 0;
   4113		}
   4114
   4115		/*
   4116		 * exclude_mod is set to trace everything but the given
   4117		 * module. If it is set and the module matches, then
   4118		 * return 0. If it is not set, and the module doesn't match
   4119		 * also return 0. Otherwise, check the function to see if
   4120		 * that matches.
   4121		 */
   4122		if (!mod_matches == !exclude_mod)
   4123			return 0;
   4124func_match:
   4125		/* blank search means to match all funcs in the mod */
   4126		if (!func_g->len)
   4127			return 1;
   4128	}
   4129
   4130	return ftrace_match(str, func_g);
   4131}
   4132
   4133static int
   4134match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
   4135{
   4136	struct ftrace_page *pg;
   4137	struct dyn_ftrace *rec;
   4138	struct ftrace_glob func_g = { .type = MATCH_FULL };
   4139	struct ftrace_glob mod_g = { .type = MATCH_FULL };
   4140	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
   4141	int exclude_mod = 0;
   4142	int found = 0;
   4143	int ret;
   4144	int clear_filter = 0;
   4145
   4146	if (func) {
   4147		func_g.type = filter_parse_regex(func, len, &func_g.search,
   4148						 &clear_filter);
   4149		func_g.len = strlen(func_g.search);
   4150	}
   4151
   4152	if (mod) {
   4153		mod_g.type = filter_parse_regex(mod, strlen(mod),
   4154				&mod_g.search, &exclude_mod);
   4155		mod_g.len = strlen(mod_g.search);
   4156	}
   4157
   4158	mutex_lock(&ftrace_lock);
   4159
   4160	if (unlikely(ftrace_disabled))
   4161		goto out_unlock;
   4162
   4163	if (func_g.type == MATCH_INDEX) {
   4164		found = add_rec_by_index(hash, &func_g, clear_filter);
   4165		goto out_unlock;
   4166	}
   4167
   4168	do_for_each_ftrace_rec(pg, rec) {
   4169
   4170		if (rec->flags & FTRACE_FL_DISABLED)
   4171			continue;
   4172
   4173		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
   4174			ret = enter_record(hash, rec, clear_filter);
   4175			if (ret < 0) {
   4176				found = ret;
   4177				goto out_unlock;
   4178			}
   4179			found = 1;
   4180		}
   4181	} while_for_each_ftrace_rec();
   4182 out_unlock:
   4183	mutex_unlock(&ftrace_lock);
   4184
   4185	return found;
   4186}
   4187
   4188static int
   4189ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
   4190{
   4191	return match_records(hash, buff, len, NULL);
   4192}
   4193
   4194static void ftrace_ops_update_code(struct ftrace_ops *ops,
   4195				   struct ftrace_ops_hash *old_hash)
   4196{
   4197	struct ftrace_ops *op;
   4198
   4199	if (!ftrace_enabled)
   4200		return;
   4201
   4202	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
   4203		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
   4204		return;
   4205	}
   4206
   4207	/*
   4208	 * If this is the shared global_ops filter, then we need to
   4209	 * check if there is another ops that shares it, is enabled.
   4210	 * If so, we still need to run the modify code.
   4211	 */
   4212	if (ops->func_hash != &global_ops.local_hash)
   4213		return;
   4214
   4215	do_for_each_ftrace_op(op, ftrace_ops_list) {
   4216		if (op->func_hash == &global_ops.local_hash &&
   4217		    op->flags & FTRACE_OPS_FL_ENABLED) {
   4218			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
   4219			/* Only need to do this once */
   4220			return;
   4221		}
   4222	} while_for_each_ftrace_op(op);
   4223}
   4224
   4225static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
   4226					   struct ftrace_hash **orig_hash,
   4227					   struct ftrace_hash *hash,
   4228					   int enable)
   4229{
   4230	struct ftrace_ops_hash old_hash_ops;
   4231	struct ftrace_hash *old_hash;
   4232	int ret;
   4233
   4234	old_hash = *orig_hash;
   4235	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
   4236	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
   4237	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
   4238	if (!ret) {
   4239		ftrace_ops_update_code(ops, &old_hash_ops);
   4240		free_ftrace_hash_rcu(old_hash);
   4241	}
   4242	return ret;
   4243}
   4244
   4245static bool module_exists(const char *module)
   4246{
   4247	/* All modules have the symbol __this_module */
   4248	static const char this_mod[] = "__this_module";
   4249	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
   4250	unsigned long val;
   4251	int n;
   4252
   4253	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
   4254
   4255	if (n > sizeof(modname) - 1)
   4256		return false;
   4257
   4258	val = module_kallsyms_lookup_name(modname);
   4259	return val != 0;
   4260}
   4261
   4262static int cache_mod(struct trace_array *tr,
   4263		     const char *func, char *module, int enable)
   4264{
   4265	struct ftrace_mod_load *ftrace_mod, *n;
   4266	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
   4267	int ret;
   4268
   4269	mutex_lock(&ftrace_lock);
   4270
   4271	/* We do not cache inverse filters */
   4272	if (func[0] == '!') {
   4273		func++;
   4274		ret = -EINVAL;
   4275
   4276		/* Look to remove this hash */
   4277		list_for_each_entry_safe(ftrace_mod, n, head, list) {
   4278			if (strcmp(ftrace_mod->module, module) != 0)
   4279				continue;
   4280
   4281			/* no func matches all */
   4282			if (strcmp(func, "*") == 0 ||
   4283			    (ftrace_mod->func &&
   4284			     strcmp(ftrace_mod->func, func) == 0)) {
   4285				ret = 0;
   4286				free_ftrace_mod(ftrace_mod);
   4287				continue;
   4288			}
   4289		}
   4290		goto out;
   4291	}
   4292
   4293	ret = -EINVAL;
   4294	/* We only care about modules that have not been loaded yet */
   4295	if (module_exists(module))
   4296		goto out;
   4297
   4298	/* Save this string off, and execute it when the module is loaded */
   4299	ret = ftrace_add_mod(tr, func, module, enable);
   4300 out:
   4301	mutex_unlock(&ftrace_lock);
   4302
   4303	return ret;
   4304}
   4305
   4306static int
   4307ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
   4308		 int reset, int enable);
   4309
   4310#ifdef CONFIG_MODULES
   4311static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
   4312			     char *mod, bool enable)
   4313{
   4314	struct ftrace_mod_load *ftrace_mod, *n;
   4315	struct ftrace_hash **orig_hash, *new_hash;
   4316	LIST_HEAD(process_mods);
   4317	char *func;
   4318
   4319	mutex_lock(&ops->func_hash->regex_lock);
   4320
   4321	if (enable)
   4322		orig_hash = &ops->func_hash->filter_hash;
   4323	else
   4324		orig_hash = &ops->func_hash->notrace_hash;
   4325
   4326	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
   4327					      *orig_hash);
   4328	if (!new_hash)
   4329		goto out; /* warn? */
   4330
   4331	mutex_lock(&ftrace_lock);
   4332
   4333	list_for_each_entry_safe(ftrace_mod, n, head, list) {
   4334
   4335		if (strcmp(ftrace_mod->module, mod) != 0)
   4336			continue;
   4337
   4338		if (ftrace_mod->func)
   4339			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
   4340		else
   4341			func = kstrdup("*", GFP_KERNEL);
   4342
   4343		if (!func) /* warn? */
   4344			continue;
   4345
   4346		list_move(&ftrace_mod->list, &process_mods);
   4347
   4348		/* Use the newly allocated func, as it may be "*" */
   4349		kfree(ftrace_mod->func);
   4350		ftrace_mod->func = func;
   4351	}
   4352
   4353	mutex_unlock(&ftrace_lock);
   4354
   4355	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
   4356
   4357		func = ftrace_mod->func;
   4358
   4359		/* Grabs ftrace_lock, which is why we have this extra step */
   4360		match_records(new_hash, func, strlen(func), mod);
   4361		free_ftrace_mod(ftrace_mod);
   4362	}
   4363
   4364	if (enable && list_empty(head))
   4365		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
   4366
   4367	mutex_lock(&ftrace_lock);
   4368
   4369	ftrace_hash_move_and_update_ops(ops, orig_hash,
   4370					      new_hash, enable);
   4371	mutex_unlock(&ftrace_lock);
   4372
   4373 out:
   4374	mutex_unlock(&ops->func_hash->regex_lock);
   4375
   4376	free_ftrace_hash(new_hash);
   4377}
   4378
   4379static void process_cached_mods(const char *mod_name)
   4380{
   4381	struct trace_array *tr;
   4382	char *mod;
   4383
   4384	mod = kstrdup(mod_name, GFP_KERNEL);
   4385	if (!mod)
   4386		return;
   4387
   4388	mutex_lock(&trace_types_lock);
   4389	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
   4390		if (!list_empty(&tr->mod_trace))
   4391			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
   4392		if (!list_empty(&tr->mod_notrace))
   4393			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
   4394	}
   4395	mutex_unlock(&trace_types_lock);
   4396
   4397	kfree(mod);
   4398}
   4399#endif
   4400
   4401/*
   4402 * We register the module command as a template to show others how
   4403 * to register the a command as well.
   4404 */
   4405
   4406static int
   4407ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
   4408		    char *func_orig, char *cmd, char *module, int enable)
   4409{
   4410	char *func;
   4411	int ret;
   4412
   4413	/* match_records() modifies func, and we need the original */
   4414	func = kstrdup(func_orig, GFP_KERNEL);
   4415	if (!func)
   4416		return -ENOMEM;
   4417
   4418	/*
   4419	 * cmd == 'mod' because we only registered this func
   4420	 * for the 'mod' ftrace_func_command.
   4421	 * But if you register one func with multiple commands,
   4422	 * you can tell which command was used by the cmd
   4423	 * parameter.
   4424	 */
   4425	ret = match_records(hash, func, strlen(func), module);
   4426	kfree(func);
   4427
   4428	if (!ret)
   4429		return cache_mod(tr, func_orig, module, enable);
   4430	if (ret < 0)
   4431		return ret;
   4432	return 0;
   4433}
   4434
   4435static struct ftrace_func_command ftrace_mod_cmd = {
   4436	.name			= "mod",
   4437	.func			= ftrace_mod_callback,
   4438};
   4439
   4440static int __init ftrace_mod_cmd_init(void)
   4441{
   4442	return register_ftrace_command(&ftrace_mod_cmd);
   4443}
   4444core_initcall(ftrace_mod_cmd_init);
   4445
   4446static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
   4447				      struct ftrace_ops *op, struct ftrace_regs *fregs)
   4448{
   4449	struct ftrace_probe_ops *probe_ops;
   4450	struct ftrace_func_probe *probe;
   4451
   4452	probe = container_of(op, struct ftrace_func_probe, ops);
   4453	probe_ops = probe->probe_ops;
   4454
   4455	/*
   4456	 * Disable preemption for these calls to prevent a RCU grace
   4457	 * period. This syncs the hash iteration and freeing of items
   4458	 * on the hash. rcu_read_lock is too dangerous here.
   4459	 */
   4460	preempt_disable_notrace();
   4461	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
   4462	preempt_enable_notrace();
   4463}
   4464
   4465struct ftrace_func_map {
   4466	struct ftrace_func_entry	entry;
   4467	void				*data;
   4468};
   4469
   4470struct ftrace_func_mapper {
   4471	struct ftrace_hash		hash;
   4472};
   4473
   4474/**
   4475 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
   4476 *
   4477 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
   4478 */
   4479struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
   4480{
   4481	struct ftrace_hash *hash;
   4482
   4483	/*
   4484	 * The mapper is simply a ftrace_hash, but since the entries
   4485	 * in the hash are not ftrace_func_entry type, we define it
   4486	 * as a separate structure.
   4487	 */
   4488	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
   4489	return (struct ftrace_func_mapper *)hash;
   4490}
   4491
   4492/**
   4493 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
   4494 * @mapper: The mapper that has the ip maps
   4495 * @ip: the instruction pointer to find the data for
   4496 *
   4497 * Returns the data mapped to @ip if found otherwise NULL. The return
   4498 * is actually the address of the mapper data pointer. The address is
   4499 * returned for use cases where the data is no bigger than a long, and
   4500 * the user can use the data pointer as its data instead of having to
   4501 * allocate more memory for the reference.
   4502 */
   4503void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
   4504				  unsigned long ip)
   4505{
   4506	struct ftrace_func_entry *entry;
   4507	struct ftrace_func_map *map;
   4508
   4509	entry = ftrace_lookup_ip(&mapper->hash, ip);
   4510	if (!entry)
   4511		return NULL;
   4512
   4513	map = (struct ftrace_func_map *)entry;
   4514	return &map->data;
   4515}
   4516
   4517/**
   4518 * ftrace_func_mapper_add_ip - Map some data to an ip
   4519 * @mapper: The mapper that has the ip maps
   4520 * @ip: The instruction pointer address to map @data to
   4521 * @data: The data to map to @ip
   4522 *
   4523 * Returns 0 on success otherwise an error.
   4524 */
   4525int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
   4526			      unsigned long ip, void *data)
   4527{
   4528	struct ftrace_func_entry *entry;
   4529	struct ftrace_func_map *map;
   4530
   4531	entry = ftrace_lookup_ip(&mapper->hash, ip);
   4532	if (entry)
   4533		return -EBUSY;
   4534
   4535	map = kmalloc(sizeof(*map), GFP_KERNEL);
   4536	if (!map)
   4537		return -ENOMEM;
   4538
   4539	map->entry.ip = ip;
   4540	map->data = data;
   4541
   4542	__add_hash_entry(&mapper->hash, &map->entry);
   4543
   4544	return 0;
   4545}
   4546
   4547/**
   4548 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
   4549 * @mapper: The mapper that has the ip maps
   4550 * @ip: The instruction pointer address to remove the data from
   4551 *
   4552 * Returns the data if it is found, otherwise NULL.
   4553 * Note, if the data pointer is used as the data itself, (see
   4554 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
   4555 * if the data pointer was set to zero.
   4556 */
   4557void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
   4558				   unsigned long ip)
   4559{
   4560	struct ftrace_func_entry *entry;
   4561	struct ftrace_func_map *map;
   4562	void *data;
   4563
   4564	entry = ftrace_lookup_ip(&mapper->hash, ip);
   4565	if (!entry)
   4566		return NULL;
   4567
   4568	map = (struct ftrace_func_map *)entry;
   4569	data = map->data;
   4570
   4571	remove_hash_entry(&mapper->hash, entry);
   4572	kfree(entry);
   4573
   4574	return data;
   4575}
   4576
   4577/**
   4578 * free_ftrace_func_mapper - free a mapping of ips and data
   4579 * @mapper: The mapper that has the ip maps
   4580 * @free_func: A function to be called on each data item.
   4581 *
   4582 * This is used to free the function mapper. The @free_func is optional
   4583 * and can be used if the data needs to be freed as well.
   4584 */
   4585void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
   4586			     ftrace_mapper_func free_func)
   4587{
   4588	struct ftrace_func_entry *entry;
   4589	struct ftrace_func_map *map;
   4590	struct hlist_head *hhd;
   4591	int size, i;
   4592
   4593	if (!mapper)
   4594		return;
   4595
   4596	if (free_func && mapper->hash.count) {
   4597		size = 1 << mapper->hash.size_bits;
   4598		for (i = 0; i < size; i++) {
   4599			hhd = &mapper->hash.buckets[i];
   4600			hlist_for_each_entry(entry, hhd, hlist) {
   4601				map = (struct ftrace_func_map *)entry;
   4602				free_func(map);
   4603			}
   4604		}
   4605	}
   4606	free_ftrace_hash(&mapper->hash);
   4607}
   4608
   4609static void release_probe(struct ftrace_func_probe *probe)
   4610{
   4611	struct ftrace_probe_ops *probe_ops;
   4612
   4613	mutex_lock(&ftrace_lock);
   4614
   4615	WARN_ON(probe->ref <= 0);
   4616
   4617	/* Subtract the ref that was used to protect this instance */
   4618	probe->ref--;
   4619
   4620	if (!probe->ref) {
   4621		probe_ops = probe->probe_ops;
   4622		/*
   4623		 * Sending zero as ip tells probe_ops to free
   4624		 * the probe->data itself
   4625		 */
   4626		if (probe_ops->free)
   4627			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
   4628		list_del(&probe->list);
   4629		kfree(probe);
   4630	}
   4631	mutex_unlock(&ftrace_lock);
   4632}
   4633
   4634static void acquire_probe_locked(struct ftrace_func_probe *probe)
   4635{
   4636	/*
   4637	 * Add one ref to keep it from being freed when releasing the
   4638	 * ftrace_lock mutex.
   4639	 */
   4640	probe->ref++;
   4641}
   4642
   4643int
   4644register_ftrace_function_probe(char *glob, struct trace_array *tr,
   4645			       struct ftrace_probe_ops *probe_ops,
   4646			       void *data)
   4647{
   4648	struct ftrace_func_probe *probe = NULL, *iter;
   4649	struct ftrace_func_entry *entry;
   4650	struct ftrace_hash **orig_hash;
   4651	struct ftrace_hash *old_hash;
   4652	struct ftrace_hash *hash;
   4653	int count = 0;
   4654	int size;
   4655	int ret;
   4656	int i;
   4657
   4658	if (WARN_ON(!tr))
   4659		return -EINVAL;
   4660
   4661	/* We do not support '!' for function probes */
   4662	if (WARN_ON(glob[0] == '!'))
   4663		return -EINVAL;
   4664
   4665
   4666	mutex_lock(&ftrace_lock);
   4667	/* Check if the probe_ops is already registered */
   4668	list_for_each_entry(iter, &tr->func_probes, list) {
   4669		if (iter->probe_ops == probe_ops) {
   4670			probe = iter;
   4671			break;
   4672		}
   4673	}
   4674	if (!probe) {
   4675		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
   4676		if (!probe) {
   4677			mutex_unlock(&ftrace_lock);
   4678			return -ENOMEM;
   4679		}
   4680		probe->probe_ops = probe_ops;
   4681		probe->ops.func = function_trace_probe_call;
   4682		probe->tr = tr;
   4683		ftrace_ops_init(&probe->ops);
   4684		list_add(&probe->list, &tr->func_probes);
   4685	}
   4686
   4687	acquire_probe_locked(probe);
   4688
   4689	mutex_unlock(&ftrace_lock);
   4690
   4691	/*
   4692	 * Note, there's a small window here that the func_hash->filter_hash
   4693	 * may be NULL or empty. Need to be careful when reading the loop.
   4694	 */
   4695	mutex_lock(&probe->ops.func_hash->regex_lock);
   4696
   4697	orig_hash = &probe->ops.func_hash->filter_hash;
   4698	old_hash = *orig_hash;
   4699	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
   4700
   4701	if (!hash) {
   4702		ret = -ENOMEM;
   4703		goto out;
   4704	}
   4705
   4706	ret = ftrace_match_records(hash, glob, strlen(glob));
   4707
   4708	/* Nothing found? */
   4709	if (!ret)
   4710		ret = -EINVAL;
   4711
   4712	if (ret < 0)
   4713		goto out;
   4714
   4715	size = 1 << hash->size_bits;
   4716	for (i = 0; i < size; i++) {
   4717		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
   4718			if (ftrace_lookup_ip(old_hash, entry->ip))
   4719				continue;
   4720			/*
   4721			 * The caller might want to do something special
   4722			 * for each function we find. We call the callback
   4723			 * to give the caller an opportunity to do so.
   4724			 */
   4725			if (probe_ops->init) {
   4726				ret = probe_ops->init(probe_ops, tr,
   4727						      entry->ip, data,
   4728						      &probe->data);
   4729				if (ret < 0) {
   4730					if (probe_ops->free && count)
   4731						probe_ops->free(probe_ops, tr,
   4732								0, probe->data);
   4733					probe->data = NULL;
   4734					goto out;
   4735				}
   4736			}
   4737			count++;
   4738		}
   4739	}
   4740
   4741	mutex_lock(&ftrace_lock);
   4742
   4743	if (!count) {
   4744		/* Nothing was added? */
   4745		ret = -EINVAL;
   4746		goto out_unlock;
   4747	}
   4748
   4749	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
   4750					      hash, 1);
   4751	if (ret < 0)
   4752		goto err_unlock;
   4753
   4754	/* One ref for each new function traced */
   4755	probe->ref += count;
   4756
   4757	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
   4758		ret = ftrace_startup(&probe->ops, 0);
   4759
   4760 out_unlock:
   4761	mutex_unlock(&ftrace_lock);
   4762
   4763	if (!ret)
   4764		ret = count;
   4765 out:
   4766	mutex_unlock(&probe->ops.func_hash->regex_lock);
   4767	free_ftrace_hash(hash);
   4768
   4769	release_probe(probe);
   4770
   4771	return ret;
   4772
   4773 err_unlock:
   4774	if (!probe_ops->free || !count)
   4775		goto out_unlock;
   4776
   4777	/* Failed to do the move, need to call the free functions */
   4778	for (i = 0; i < size; i++) {
   4779		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
   4780			if (ftrace_lookup_ip(old_hash, entry->ip))
   4781				continue;
   4782			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
   4783		}
   4784	}
   4785	goto out_unlock;
   4786}
   4787
   4788int
   4789unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
   4790				      struct ftrace_probe_ops *probe_ops)
   4791{
   4792	struct ftrace_func_probe *probe = NULL, *iter;
   4793	struct ftrace_ops_hash old_hash_ops;
   4794	struct ftrace_func_entry *entry;
   4795	struct ftrace_glob func_g;
   4796	struct ftrace_hash **orig_hash;
   4797	struct ftrace_hash *old_hash;
   4798	struct ftrace_hash *hash = NULL;
   4799	struct hlist_node *tmp;
   4800	struct hlist_head hhd;
   4801	char str[KSYM_SYMBOL_LEN];
   4802	int count = 0;
   4803	int i, ret = -ENODEV;
   4804	int size;
   4805
   4806	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
   4807		func_g.search = NULL;
   4808	else {
   4809		int not;
   4810
   4811		func_g.type = filter_parse_regex(glob, strlen(glob),
   4812						 &func_g.search, &not);
   4813		func_g.len = strlen(func_g.search);
   4814
   4815		/* we do not support '!' for function probes */
   4816		if (WARN_ON(not))
   4817			return -EINVAL;
   4818	}
   4819
   4820	mutex_lock(&ftrace_lock);
   4821	/* Check if the probe_ops is already registered */
   4822	list_for_each_entry(iter, &tr->func_probes, list) {
   4823		if (iter->probe_ops == probe_ops) {
   4824			probe = iter;
   4825			break;
   4826		}
   4827	}
   4828	if (!probe)
   4829		goto err_unlock_ftrace;
   4830
   4831	ret = -EINVAL;
   4832	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
   4833		goto err_unlock_ftrace;
   4834
   4835	acquire_probe_locked(probe);
   4836
   4837	mutex_unlock(&ftrace_lock);
   4838
   4839	mutex_lock(&probe->ops.func_hash->regex_lock);
   4840
   4841	orig_hash = &probe->ops.func_hash->filter_hash;
   4842	old_hash = *orig_hash;
   4843
   4844	if (ftrace_hash_empty(old_hash))
   4845		goto out_unlock;
   4846
   4847	old_hash_ops.filter_hash = old_hash;
   4848	/* Probes only have filters */
   4849	old_hash_ops.notrace_hash = NULL;
   4850
   4851	ret = -ENOMEM;
   4852	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
   4853	if (!hash)
   4854		goto out_unlock;
   4855
   4856	INIT_HLIST_HEAD(&hhd);
   4857
   4858	size = 1 << hash->size_bits;
   4859	for (i = 0; i < size; i++) {
   4860		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
   4861
   4862			if (func_g.search) {
   4863				kallsyms_lookup(entry->ip, NULL, NULL,
   4864						NULL, str);
   4865				if (!ftrace_match(str, &func_g))
   4866					continue;
   4867			}
   4868			count++;
   4869			remove_hash_entry(hash, entry);
   4870			hlist_add_head(&entry->hlist, &hhd);
   4871		}
   4872	}
   4873
   4874	/* Nothing found? */
   4875	if (!count) {
   4876		ret = -EINVAL;
   4877		goto out_unlock;
   4878	}
   4879
   4880	mutex_lock(&ftrace_lock);
   4881
   4882	WARN_ON(probe->ref < count);
   4883
   4884	probe->ref -= count;
   4885
   4886	if (ftrace_hash_empty(hash))
   4887		ftrace_shutdown(&probe->ops, 0);
   4888
   4889	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
   4890					      hash, 1);
   4891
   4892	/* still need to update the function call sites */
   4893	if (ftrace_enabled && !ftrace_hash_empty(hash))
   4894		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
   4895				       &old_hash_ops);
   4896	synchronize_rcu();
   4897
   4898	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
   4899		hlist_del(&entry->hlist);
   4900		if (probe_ops->free)
   4901			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
   4902		kfree(entry);
   4903	}
   4904	mutex_unlock(&ftrace_lock);
   4905
   4906 out_unlock:
   4907	mutex_unlock(&probe->ops.func_hash->regex_lock);
   4908	free_ftrace_hash(hash);
   4909
   4910	release_probe(probe);
   4911
   4912	return ret;
   4913
   4914 err_unlock_ftrace:
   4915	mutex_unlock(&ftrace_lock);
   4916	return ret;
   4917}
   4918
   4919void clear_ftrace_function_probes(struct trace_array *tr)
   4920{
   4921	struct ftrace_func_probe *probe, *n;
   4922
   4923	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
   4924		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
   4925}
   4926
   4927static LIST_HEAD(ftrace_commands);
   4928static DEFINE_MUTEX(ftrace_cmd_mutex);
   4929
   4930/*
   4931 * Currently we only register ftrace commands from __init, so mark this
   4932 * __init too.
   4933 */
   4934__init int register_ftrace_command(struct ftrace_func_command *cmd)
   4935{
   4936	struct ftrace_func_command *p;
   4937	int ret = 0;
   4938
   4939	mutex_lock(&ftrace_cmd_mutex);
   4940	list_for_each_entry(p, &ftrace_commands, list) {
   4941		if (strcmp(cmd->name, p->name) == 0) {
   4942			ret = -EBUSY;
   4943			goto out_unlock;
   4944		}
   4945	}
   4946	list_add(&cmd->list, &ftrace_commands);
   4947 out_unlock:
   4948	mutex_unlock(&ftrace_cmd_mutex);
   4949
   4950	return ret;
   4951}
   4952
   4953/*
   4954 * Currently we only unregister ftrace commands from __init, so mark
   4955 * this __init too.
   4956 */
   4957__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
   4958{
   4959	struct ftrace_func_command *p, *n;
   4960	int ret = -ENODEV;
   4961
   4962	mutex_lock(&ftrace_cmd_mutex);
   4963	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
   4964		if (strcmp(cmd->name, p->name) == 0) {
   4965			ret = 0;
   4966			list_del_init(&p->list);
   4967			goto out_unlock;
   4968		}
   4969	}
   4970 out_unlock:
   4971	mutex_unlock(&ftrace_cmd_mutex);
   4972
   4973	return ret;
   4974}
   4975
   4976static int ftrace_process_regex(struct ftrace_iterator *iter,
   4977				char *buff, int len, int enable)
   4978{
   4979	struct ftrace_hash *hash = iter->hash;
   4980	struct trace_array *tr = iter->ops->private;
   4981	char *func, *command, *next = buff;
   4982	struct ftrace_func_command *p;
   4983	int ret = -EINVAL;
   4984
   4985	func = strsep(&next, ":");
   4986
   4987	if (!next) {
   4988		ret = ftrace_match_records(hash, func, len);
   4989		if (!ret)
   4990			ret = -EINVAL;
   4991		if (ret < 0)
   4992			return ret;
   4993		return 0;
   4994	}
   4995
   4996	/* command found */
   4997
   4998	command = strsep(&next, ":");
   4999
   5000	mutex_lock(&ftrace_cmd_mutex);
   5001	list_for_each_entry(p, &ftrace_commands, list) {
   5002		if (strcmp(p->name, command) == 0) {
   5003			ret = p->func(tr, hash, func, command, next, enable);
   5004			goto out_unlock;
   5005		}
   5006	}
   5007 out_unlock:
   5008	mutex_unlock(&ftrace_cmd_mutex);
   5009
   5010	return ret;
   5011}
   5012
   5013static ssize_t
   5014ftrace_regex_write(struct file *file, const char __user *ubuf,
   5015		   size_t cnt, loff_t *ppos, int enable)
   5016{
   5017	struct ftrace_iterator *iter;
   5018	struct trace_parser *parser;
   5019	ssize_t ret, read;
   5020
   5021	if (!cnt)
   5022		return 0;
   5023
   5024	if (file->f_mode & FMODE_READ) {
   5025		struct seq_file *m = file->private_data;
   5026		iter = m->private;
   5027	} else
   5028		iter = file->private_data;
   5029
   5030	if (unlikely(ftrace_disabled))
   5031		return -ENODEV;
   5032
   5033	/* iter->hash is a local copy, so we don't need regex_lock */
   5034
   5035	parser = &iter->parser;
   5036	read = trace_get_user(parser, ubuf, cnt, ppos);
   5037
   5038	if (read >= 0 && trace_parser_loaded(parser) &&
   5039	    !trace_parser_cont(parser)) {
   5040		ret = ftrace_process_regex(iter, parser->buffer,
   5041					   parser->idx, enable);
   5042		trace_parser_clear(parser);
   5043		if (ret < 0)
   5044			goto out;
   5045	}
   5046
   5047	ret = read;
   5048 out:
   5049	return ret;
   5050}
   5051
   5052ssize_t
   5053ftrace_filter_write(struct file *file, const char __user *ubuf,
   5054		    size_t cnt, loff_t *ppos)
   5055{
   5056	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
   5057}
   5058
   5059ssize_t
   5060ftrace_notrace_write(struct file *file, const char __user *ubuf,
   5061		     size_t cnt, loff_t *ppos)
   5062{
   5063	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
   5064}
   5065
   5066static int
   5067__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
   5068{
   5069	struct ftrace_func_entry *entry;
   5070
   5071	ip = ftrace_location(ip);
   5072	if (!ip)
   5073		return -EINVAL;
   5074
   5075	if (remove) {
   5076		entry = ftrace_lookup_ip(hash, ip);
   5077		if (!entry)
   5078			return -ENOENT;
   5079		free_hash_entry(hash, entry);
   5080		return 0;
   5081	}
   5082
   5083	return add_hash_entry(hash, ip);
   5084}
   5085
   5086static int
   5087ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
   5088		  unsigned int cnt, int remove)
   5089{
   5090	unsigned int i;
   5091	int err;
   5092
   5093	for (i = 0; i < cnt; i++) {
   5094		err = __ftrace_match_addr(hash, ips[i], remove);
   5095		if (err) {
   5096			/*
   5097			 * This expects the @hash is a temporary hash and if this
   5098			 * fails the caller must free the @hash.
   5099			 */
   5100			return err;
   5101		}
   5102	}
   5103	return 0;
   5104}
   5105
   5106static int
   5107ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
   5108		unsigned long *ips, unsigned int cnt,
   5109		int remove, int reset, int enable)
   5110{
   5111	struct ftrace_hash **orig_hash;
   5112	struct ftrace_hash *hash;
   5113	int ret;
   5114
   5115	if (unlikely(ftrace_disabled))
   5116		return -ENODEV;
   5117
   5118	mutex_lock(&ops->func_hash->regex_lock);
   5119
   5120	if (enable)
   5121		orig_hash = &ops->func_hash->filter_hash;
   5122	else
   5123		orig_hash = &ops->func_hash->notrace_hash;
   5124
   5125	if (reset)
   5126		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
   5127	else
   5128		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
   5129
   5130	if (!hash) {
   5131		ret = -ENOMEM;
   5132		goto out_regex_unlock;
   5133	}
   5134
   5135	if (buf && !ftrace_match_records(hash, buf, len)) {
   5136		ret = -EINVAL;
   5137		goto out_regex_unlock;
   5138	}
   5139	if (ips) {
   5140		ret = ftrace_match_addr(hash, ips, cnt, remove);
   5141		if (ret < 0)
   5142			goto out_regex_unlock;
   5143	}
   5144
   5145	mutex_lock(&ftrace_lock);
   5146	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
   5147	mutex_unlock(&ftrace_lock);
   5148
   5149 out_regex_unlock:
   5150	mutex_unlock(&ops->func_hash->regex_lock);
   5151
   5152	free_ftrace_hash(hash);
   5153	return ret;
   5154}
   5155
   5156static int
   5157ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
   5158		int remove, int reset, int enable)
   5159{
   5160	return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
   5161}
   5162
   5163#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
   5164
   5165struct ftrace_direct_func {
   5166	struct list_head	next;
   5167	unsigned long		addr;
   5168	int			count;
   5169};
   5170
   5171static LIST_HEAD(ftrace_direct_funcs);
   5172
   5173/**
   5174 * ftrace_find_direct_func - test an address if it is a registered direct caller
   5175 * @addr: The address of a registered direct caller
   5176 *
   5177 * This searches to see if a ftrace direct caller has been registered
   5178 * at a specific address, and if so, it returns a descriptor for it.
   5179 *
   5180 * This can be used by architecture code to see if an address is
   5181 * a direct caller (trampoline) attached to a fentry/mcount location.
   5182 * This is useful for the function_graph tracer, as it may need to
   5183 * do adjustments if it traced a location that also has a direct
   5184 * trampoline attached to it.
   5185 */
   5186struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
   5187{
   5188	struct ftrace_direct_func *entry;
   5189	bool found = false;
   5190
   5191	/* May be called by fgraph trampoline (protected by rcu tasks) */
   5192	list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
   5193		if (entry->addr == addr) {
   5194			found = true;
   5195			break;
   5196		}
   5197	}
   5198	if (found)
   5199		return entry;
   5200
   5201	return NULL;
   5202}
   5203
   5204static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
   5205{
   5206	struct ftrace_direct_func *direct;
   5207
   5208	direct = kmalloc(sizeof(*direct), GFP_KERNEL);
   5209	if (!direct)
   5210		return NULL;
   5211	direct->addr = addr;
   5212	direct->count = 0;
   5213	list_add_rcu(&direct->next, &ftrace_direct_funcs);
   5214	ftrace_direct_func_count++;
   5215	return direct;
   5216}
   5217
   5218/**
   5219 * register_ftrace_direct - Call a custom trampoline directly
   5220 * @ip: The address of the nop at the beginning of a function
   5221 * @addr: The address of the trampoline to call at @ip
   5222 *
   5223 * This is used to connect a direct call from the nop location (@ip)
   5224 * at the start of ftrace traced functions. The location that it calls
   5225 * (@addr) must be able to handle a direct call, and save the parameters
   5226 * of the function being traced, and restore them (or inject new ones
   5227 * if needed), before returning.
   5228 *
   5229 * Returns:
   5230 *  0 on success
   5231 *  -EBUSY - Another direct function is already attached (there can be only one)
   5232 *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
   5233 *  -ENOMEM - There was an allocation failure.
   5234 */
   5235int register_ftrace_direct(unsigned long ip, unsigned long addr)
   5236{
   5237	struct ftrace_direct_func *direct;
   5238	struct ftrace_func_entry *entry;
   5239	struct ftrace_hash *free_hash = NULL;
   5240	struct dyn_ftrace *rec;
   5241	int ret = -ENODEV;
   5242
   5243	mutex_lock(&direct_mutex);
   5244
   5245	ip = ftrace_location(ip);
   5246	if (!ip)
   5247		goto out_unlock;
   5248
   5249	/* See if there's a direct function at @ip already */
   5250	ret = -EBUSY;
   5251	if (ftrace_find_rec_direct(ip))
   5252		goto out_unlock;
   5253
   5254	ret = -ENODEV;
   5255	rec = lookup_rec(ip, ip);
   5256	if (!rec)
   5257		goto out_unlock;
   5258
   5259	/*
   5260	 * Check if the rec says it has a direct call but we didn't
   5261	 * find one earlier?
   5262	 */
   5263	if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
   5264		goto out_unlock;
   5265
   5266	/* Make sure the ip points to the exact record */
   5267	if (ip != rec->ip) {
   5268		ip = rec->ip;
   5269		/* Need to check this ip for a direct. */
   5270		if (ftrace_find_rec_direct(ip))
   5271			goto out_unlock;
   5272	}
   5273
   5274	ret = -ENOMEM;
   5275	direct = ftrace_find_direct_func(addr);
   5276	if (!direct) {
   5277		direct = ftrace_alloc_direct_func(addr);
   5278		if (!direct)
   5279			goto out_unlock;
   5280	}
   5281
   5282	entry = ftrace_add_rec_direct(ip, addr, &free_hash);
   5283	if (!entry)
   5284		goto out_unlock;
   5285
   5286	ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
   5287
   5288	if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
   5289		ret = register_ftrace_function(&direct_ops);
   5290		if (ret)
   5291			ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
   5292	}
   5293
   5294	if (ret) {
   5295		remove_hash_entry(direct_functions, entry);
   5296		kfree(entry);
   5297		if (!direct->count) {
   5298			list_del_rcu(&direct->next);
   5299			synchronize_rcu_tasks();
   5300			kfree(direct);
   5301			if (free_hash)
   5302				free_ftrace_hash(free_hash);
   5303			free_hash = NULL;
   5304			ftrace_direct_func_count--;
   5305		}
   5306	} else {
   5307		direct->count++;
   5308	}
   5309 out_unlock:
   5310	mutex_unlock(&direct_mutex);
   5311
   5312	if (free_hash) {
   5313		synchronize_rcu_tasks();
   5314		free_ftrace_hash(free_hash);
   5315	}
   5316
   5317	return ret;
   5318}
   5319EXPORT_SYMBOL_GPL(register_ftrace_direct);
   5320
   5321static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
   5322						   struct dyn_ftrace **recp)
   5323{
   5324	struct ftrace_func_entry *entry;
   5325	struct dyn_ftrace *rec;
   5326
   5327	rec = lookup_rec(*ip, *ip);
   5328	if (!rec)
   5329		return NULL;
   5330
   5331	entry = __ftrace_lookup_ip(direct_functions, rec->ip);
   5332	if (!entry) {
   5333		WARN_ON(rec->flags & FTRACE_FL_DIRECT);
   5334		return NULL;
   5335	}
   5336
   5337	WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
   5338
   5339	/* Passed in ip just needs to be on the call site */
   5340	*ip = rec->ip;
   5341
   5342	if (recp)
   5343		*recp = rec;
   5344
   5345	return entry;
   5346}
   5347
   5348int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
   5349{
   5350	struct ftrace_direct_func *direct;
   5351	struct ftrace_func_entry *entry;
   5352	struct ftrace_hash *hash;
   5353	int ret = -ENODEV;
   5354
   5355	mutex_lock(&direct_mutex);
   5356
   5357	ip = ftrace_location(ip);
   5358	if (!ip)
   5359		goto out_unlock;
   5360
   5361	entry = find_direct_entry(&ip, NULL);
   5362	if (!entry)
   5363		goto out_unlock;
   5364
   5365	hash = direct_ops.func_hash->filter_hash;
   5366	if (hash->count == 1)
   5367		unregister_ftrace_function(&direct_ops);
   5368
   5369	ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
   5370
   5371	WARN_ON(ret);
   5372
   5373	remove_hash_entry(direct_functions, entry);
   5374
   5375	direct = ftrace_find_direct_func(addr);
   5376	if (!WARN_ON(!direct)) {
   5377		/* This is the good path (see the ! before WARN) */
   5378		direct->count--;
   5379		WARN_ON(direct->count < 0);
   5380		if (!direct->count) {
   5381			list_del_rcu(&direct->next);
   5382			synchronize_rcu_tasks();
   5383			kfree(direct);
   5384			kfree(entry);
   5385			ftrace_direct_func_count--;
   5386		}
   5387	}
   5388 out_unlock:
   5389	mutex_unlock(&direct_mutex);
   5390
   5391	return ret;
   5392}
   5393EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
   5394
   5395static struct ftrace_ops stub_ops = {
   5396	.func		= ftrace_stub,
   5397};
   5398
   5399/**
   5400 * ftrace_modify_direct_caller - modify ftrace nop directly
   5401 * @entry: The ftrace hash entry of the direct helper for @rec
   5402 * @rec: The record representing the function site to patch
   5403 * @old_addr: The location that the site at @rec->ip currently calls
   5404 * @new_addr: The location that the site at @rec->ip should call
   5405 *
   5406 * An architecture may overwrite this function to optimize the
   5407 * changing of the direct callback on an ftrace nop location.
   5408 * This is called with the ftrace_lock mutex held, and no other
   5409 * ftrace callbacks are on the associated record (@rec). Thus,
   5410 * it is safe to modify the ftrace record, where it should be
   5411 * currently calling @old_addr directly, to call @new_addr.
   5412 *
   5413 * Safety checks should be made to make sure that the code at
   5414 * @rec->ip is currently calling @old_addr. And this must
   5415 * also update entry->direct to @new_addr.
   5416 */
   5417int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
   5418				       struct dyn_ftrace *rec,
   5419				       unsigned long old_addr,
   5420				       unsigned long new_addr)
   5421{
   5422	unsigned long ip = rec->ip;
   5423	int ret;
   5424
   5425	/*
   5426	 * The ftrace_lock was used to determine if the record
   5427	 * had more than one registered user to it. If it did,
   5428	 * we needed to prevent that from changing to do the quick
   5429	 * switch. But if it did not (only a direct caller was attached)
   5430	 * then this function is called. But this function can deal
   5431	 * with attached callers to the rec that we care about, and
   5432	 * since this function uses standard ftrace calls that take
   5433	 * the ftrace_lock mutex, we need to release it.
   5434	 */
   5435	mutex_unlock(&ftrace_lock);
   5436
   5437	/*
   5438	 * By setting a stub function at the same address, we force
   5439	 * the code to call the iterator and the direct_ops helper.
   5440	 * This means that @ip does not call the direct call, and
   5441	 * we can simply modify it.
   5442	 */
   5443	ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
   5444	if (ret)
   5445		goto out_lock;
   5446
   5447	ret = register_ftrace_function(&stub_ops);
   5448	if (ret) {
   5449		ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
   5450		goto out_lock;
   5451	}
   5452
   5453	entry->direct = new_addr;
   5454
   5455	/*
   5456	 * By removing the stub, we put back the direct call, calling
   5457	 * the @new_addr.
   5458	 */
   5459	unregister_ftrace_function(&stub_ops);
   5460	ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
   5461
   5462 out_lock:
   5463	mutex_lock(&ftrace_lock);
   5464
   5465	return ret;
   5466}
   5467
   5468/**
   5469 * modify_ftrace_direct - Modify an existing direct call to call something else
   5470 * @ip: The instruction pointer to modify
   5471 * @old_addr: The address that the current @ip calls directly
   5472 * @new_addr: The address that the @ip should call
   5473 *
   5474 * This modifies a ftrace direct caller at an instruction pointer without
   5475 * having to disable it first. The direct call will switch over to the
   5476 * @new_addr without missing anything.
   5477 *
   5478 * Returns: zero on success. Non zero on error, which includes:
   5479 *  -ENODEV : the @ip given has no direct caller attached
   5480 *  -EINVAL : the @old_addr does not match the current direct caller
   5481 */
   5482int modify_ftrace_direct(unsigned long ip,
   5483			 unsigned long old_addr, unsigned long new_addr)
   5484{
   5485	struct ftrace_direct_func *direct, *new_direct = NULL;
   5486	struct ftrace_func_entry *entry;
   5487	struct dyn_ftrace *rec;
   5488	int ret = -ENODEV;
   5489
   5490	mutex_lock(&direct_mutex);
   5491
   5492	mutex_lock(&ftrace_lock);
   5493
   5494	ip = ftrace_location(ip);
   5495	if (!ip)
   5496		goto out_unlock;
   5497
   5498	entry = find_direct_entry(&ip, &rec);
   5499	if (!entry)
   5500		goto out_unlock;
   5501
   5502	ret = -EINVAL;
   5503	if (entry->direct != old_addr)
   5504		goto out_unlock;
   5505
   5506	direct = ftrace_find_direct_func(old_addr);
   5507	if (WARN_ON(!direct))
   5508		goto out_unlock;
   5509	if (direct->count > 1) {
   5510		ret = -ENOMEM;
   5511		new_direct = ftrace_alloc_direct_func(new_addr);
   5512		if (!new_direct)
   5513			goto out_unlock;
   5514		direct->count--;
   5515		new_direct->count++;
   5516	} else {
   5517		direct->addr = new_addr;
   5518	}
   5519
   5520	/*
   5521	 * If there's no other ftrace callback on the rec->ip location,
   5522	 * then it can be changed directly by the architecture.
   5523	 * If there is another caller, then we just need to change the
   5524	 * direct caller helper to point to @new_addr.
   5525	 */
   5526	if (ftrace_rec_count(rec) == 1) {
   5527		ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
   5528	} else {
   5529		entry->direct = new_addr;
   5530		ret = 0;
   5531	}
   5532
   5533	if (unlikely(ret && new_direct)) {
   5534		direct->count++;
   5535		list_del_rcu(&new_direct->next);
   5536		synchronize_rcu_tasks();
   5537		kfree(new_direct);
   5538		ftrace_direct_func_count--;
   5539	}
   5540
   5541 out_unlock:
   5542	mutex_unlock(&ftrace_lock);
   5543	mutex_unlock(&direct_mutex);
   5544	return ret;
   5545}
   5546EXPORT_SYMBOL_GPL(modify_ftrace_direct);
   5547
   5548#define MULTI_FLAGS (FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_DIRECT | \
   5549		     FTRACE_OPS_FL_SAVE_REGS)
   5550
   5551static int check_direct_multi(struct ftrace_ops *ops)
   5552{
   5553	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
   5554		return -EINVAL;
   5555	if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
   5556		return -EINVAL;
   5557	return 0;
   5558}
   5559
   5560static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
   5561{
   5562	struct ftrace_func_entry *entry, *del;
   5563	int size, i;
   5564
   5565	size = 1 << hash->size_bits;
   5566	for (i = 0; i < size; i++) {
   5567		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
   5568			del = __ftrace_lookup_ip(direct_functions, entry->ip);
   5569			if (del && del->direct == addr) {
   5570				remove_hash_entry(direct_functions, del);
   5571				kfree(del);
   5572			}
   5573		}
   5574	}
   5575}
   5576
   5577/**
   5578 * register_ftrace_direct_multi - Call a custom trampoline directly
   5579 * for multiple functions registered in @ops
   5580 * @ops: The address of the struct ftrace_ops object
   5581 * @addr: The address of the trampoline to call at @ops functions
   5582 *
   5583 * This is used to connect a direct calls to @addr from the nop locations
   5584 * of the functions registered in @ops (with by ftrace_set_filter_ip
   5585 * function).
   5586 *
   5587 * The location that it calls (@addr) must be able to handle a direct call,
   5588 * and save the parameters of the function being traced, and restore them
   5589 * (or inject new ones if needed), before returning.
   5590 *
   5591 * Returns:
   5592 *  0 on success
   5593 *  -EINVAL  - The @ops object was already registered with this call or
   5594 *             when there are no functions in @ops object.
   5595 *  -EBUSY   - Another direct function is already attached (there can be only one)
   5596 *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
   5597 *  -ENOMEM  - There was an allocation failure.
   5598 */
   5599int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
   5600{
   5601	struct ftrace_hash *hash, *free_hash = NULL;
   5602	struct ftrace_func_entry *entry, *new;
   5603	int err = -EBUSY, size, i;
   5604
   5605	if (ops->func || ops->trampoline)
   5606		return -EINVAL;
   5607	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
   5608		return -EINVAL;
   5609	if (ops->flags & FTRACE_OPS_FL_ENABLED)
   5610		return -EINVAL;
   5611
   5612	hash = ops->func_hash->filter_hash;
   5613	if (ftrace_hash_empty(hash))
   5614		return -EINVAL;
   5615
   5616	mutex_lock(&direct_mutex);
   5617
   5618	/* Make sure requested entries are not already registered.. */
   5619	size = 1 << hash->size_bits;
   5620	for (i = 0; i < size; i++) {
   5621		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
   5622			if (ftrace_find_rec_direct(entry->ip))
   5623				goto out_unlock;
   5624		}
   5625	}
   5626
   5627	/* ... and insert them to direct_functions hash. */
   5628	err = -ENOMEM;
   5629	for (i = 0; i < size; i++) {
   5630		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
   5631			new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
   5632			if (!new)
   5633				goto out_remove;
   5634			entry->direct = addr;
   5635		}
   5636	}
   5637
   5638	ops->func = call_direct_funcs;
   5639	ops->flags = MULTI_FLAGS;
   5640	ops->trampoline = FTRACE_REGS_ADDR;
   5641
   5642	err = register_ftrace_function(ops);
   5643
   5644 out_remove:
   5645	if (err)
   5646		remove_direct_functions_hash(hash, addr);
   5647
   5648 out_unlock:
   5649	mutex_unlock(&direct_mutex);
   5650
   5651	if (free_hash) {
   5652		synchronize_rcu_tasks();
   5653		free_ftrace_hash(free_hash);
   5654	}
   5655	return err;
   5656}
   5657EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
   5658
   5659/**
   5660 * unregister_ftrace_direct_multi - Remove calls to custom trampoline
   5661 * previously registered by register_ftrace_direct_multi for @ops object.
   5662 * @ops: The address of the struct ftrace_ops object
   5663 *
   5664 * This is used to remove a direct calls to @addr from the nop locations
   5665 * of the functions registered in @ops (with by ftrace_set_filter_ip
   5666 * function).
   5667 *
   5668 * Returns:
   5669 *  0 on success
   5670 *  -EINVAL - The @ops object was not properly registered.
   5671 */
   5672int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
   5673{
   5674	struct ftrace_hash *hash = ops->func_hash->filter_hash;
   5675	int err;
   5676
   5677	if (check_direct_multi(ops))
   5678		return -EINVAL;
   5679	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
   5680		return -EINVAL;
   5681
   5682	mutex_lock(&direct_mutex);
   5683	err = unregister_ftrace_function(ops);
   5684	remove_direct_functions_hash(hash, addr);
   5685	mutex_unlock(&direct_mutex);
   5686
   5687	/* cleanup for possible another register call */
   5688	ops->func = NULL;
   5689	ops->trampoline = 0;
   5690	return err;
   5691}
   5692EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
   5693
   5694/**
   5695 * modify_ftrace_direct_multi - Modify an existing direct 'multi' call
   5696 * to call something else
   5697 * @ops: The address of the struct ftrace_ops object
   5698 * @addr: The address of the new trampoline to call at @ops functions
   5699 *
   5700 * This is used to unregister currently registered direct caller and
   5701 * register new one @addr on functions registered in @ops object.
   5702 *
   5703 * Note there's window between ftrace_shutdown and ftrace_startup calls
   5704 * where there will be no callbacks called.
   5705 *
   5706 * Returns: zero on success. Non zero on error, which includes:
   5707 *  -EINVAL - The @ops object was not properly registered.
   5708 */
   5709int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
   5710{
   5711	struct ftrace_hash *hash;
   5712	struct ftrace_func_entry *entry, *iter;
   5713	static struct ftrace_ops tmp_ops = {
   5714		.func		= ftrace_stub,
   5715		.flags		= FTRACE_OPS_FL_STUB,
   5716	};
   5717	int i, size;
   5718	int err;
   5719
   5720	if (check_direct_multi(ops))
   5721		return -EINVAL;
   5722	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
   5723		return -EINVAL;
   5724
   5725	mutex_lock(&direct_mutex);
   5726
   5727	/* Enable the tmp_ops to have the same functions as the direct ops */
   5728	ftrace_ops_init(&tmp_ops);
   5729	tmp_ops.func_hash = ops->func_hash;
   5730
   5731	err = register_ftrace_function(&tmp_ops);
   5732	if (err)
   5733		goto out_direct;
   5734
   5735	/*
   5736	 * Now the ftrace_ops_list_func() is called to do the direct callers.
   5737	 * We can safely change the direct functions attached to each entry.
   5738	 */
   5739	mutex_lock(&ftrace_lock);
   5740
   5741	hash = ops->func_hash->filter_hash;
   5742	size = 1 << hash->size_bits;
   5743	for (i = 0; i < size; i++) {
   5744		hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
   5745			entry = __ftrace_lookup_ip(direct_functions, iter->ip);
   5746			if (!entry)
   5747				continue;
   5748			entry->direct = addr;
   5749		}
   5750	}
   5751
   5752	mutex_unlock(&ftrace_lock);
   5753
   5754	/* Removing the tmp_ops will add the updated direct callers to the functions */
   5755	unregister_ftrace_function(&tmp_ops);
   5756
   5757 out_direct:
   5758	mutex_unlock(&direct_mutex);
   5759	return err;
   5760}
   5761EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
   5762#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
   5763
   5764/**
   5765 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
   5766 * @ops - the ops to set the filter with
   5767 * @ip - the address to add to or remove from the filter.
   5768 * @remove - non zero to remove the ip from the filter
   5769 * @reset - non zero to reset all filters before applying this filter.
   5770 *
   5771 * Filters denote which functions should be enabled when tracing is enabled
   5772 * If @ip is NULL, it fails to update filter.
   5773 */
   5774int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
   5775			 int remove, int reset)
   5776{
   5777	ftrace_ops_init(ops);
   5778	return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
   5779}
   5780EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
   5781
   5782/**
   5783 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
   5784 * @ops - the ops to set the filter with
   5785 * @ips - the array of addresses to add to or remove from the filter.
   5786 * @cnt - the number of addresses in @ips
   5787 * @remove - non zero to remove ips from the filter
   5788 * @reset - non zero to reset all filters before applying this filter.
   5789 *
   5790 * Filters denote which functions should be enabled when tracing is enabled
   5791 * If @ips array or any ip specified within is NULL , it fails to update filter.
   5792 */
   5793int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
   5794			  unsigned int cnt, int remove, int reset)
   5795{
   5796	ftrace_ops_init(ops);
   5797	return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
   5798}
   5799EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
   5800
   5801/**
   5802 * ftrace_ops_set_global_filter - setup ops to use global filters
   5803 * @ops - the ops which will use the global filters
   5804 *
   5805 * ftrace users who need global function trace filtering should call this.
   5806 * It can set the global filter only if ops were not initialized before.
   5807 */
   5808void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
   5809{
   5810	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
   5811		return;
   5812
   5813	ftrace_ops_init(ops);
   5814	ops->func_hash = &global_ops.local_hash;
   5815}
   5816EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
   5817
   5818static int
   5819ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
   5820		 int reset, int enable)
   5821{
   5822	return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
   5823}
   5824
   5825/**
   5826 * ftrace_set_filter - set a function to filter on in ftrace
   5827 * @ops - the ops to set the filter with
   5828 * @buf - the string that holds the function filter text.
   5829 * @len - the length of the string.
   5830 * @reset - non zero to reset all filters before applying this filter.
   5831 *
   5832 * Filters denote which functions should be enabled when tracing is enabled.
   5833 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
   5834 */
   5835int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
   5836		       int len, int reset)
   5837{
   5838	ftrace_ops_init(ops);
   5839	return ftrace_set_regex(ops, buf, len, reset, 1);
   5840}
   5841EXPORT_SYMBOL_GPL(ftrace_set_filter);
   5842
   5843/**
   5844 * ftrace_set_notrace - set a function to not trace in ftrace
   5845 * @ops - the ops to set the notrace filter with
   5846 * @buf - the string that holds the function notrace text.
   5847 * @len - the length of the string.
   5848 * @reset - non zero to reset all filters before applying this filter.
   5849 *
   5850 * Notrace Filters denote which functions should not be enabled when tracing
   5851 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
   5852 * for tracing.
   5853 */
   5854int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
   5855			int len, int reset)
   5856{
   5857	ftrace_ops_init(ops);
   5858	return ftrace_set_regex(ops, buf, len, reset, 0);
   5859}
   5860EXPORT_SYMBOL_GPL(ftrace_set_notrace);
   5861/**
   5862 * ftrace_set_global_filter - set a function to filter on with global tracers
   5863 * @buf - the string that holds the function filter text.
   5864 * @len - the length of the string.
   5865 * @reset - non zero to reset all filters before applying this filter.
   5866 *
   5867 * Filters denote which functions should be enabled when tracing is enabled.
   5868 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
   5869 */
   5870void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
   5871{
   5872	ftrace_set_regex(&global_ops, buf, len, reset, 1);
   5873}
   5874EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
   5875
   5876/**
   5877 * ftrace_set_global_notrace - set a function to not trace with global tracers
   5878 * @buf - the string that holds the function notrace text.
   5879 * @len - the length of the string.
   5880 * @reset - non zero to reset all filters before applying this filter.
   5881 *
   5882 * Notrace Filters denote which functions should not be enabled when tracing
   5883 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
   5884 * for tracing.
   5885 */
   5886void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
   5887{
   5888	ftrace_set_regex(&global_ops, buf, len, reset, 0);
   5889}
   5890EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
   5891
   5892/*
   5893 * command line interface to allow users to set filters on boot up.
   5894 */
   5895#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
   5896static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
   5897static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
   5898
   5899/* Used by function selftest to not test if filter is set */
   5900bool ftrace_filter_param __initdata;
   5901
   5902static int __init set_ftrace_notrace(char *str)
   5903{
   5904	ftrace_filter_param = true;
   5905	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
   5906	return 1;
   5907}
   5908__setup("ftrace_notrace=", set_ftrace_notrace);
   5909
   5910static int __init set_ftrace_filter(char *str)
   5911{
   5912	ftrace_filter_param = true;
   5913	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
   5914	return 1;
   5915}
   5916__setup("ftrace_filter=", set_ftrace_filter);
   5917
   5918#ifdef CONFIG_FUNCTION_GRAPH_TRACER
   5919static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
   5920static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
   5921static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
   5922
   5923static int __init set_graph_function(char *str)
   5924{
   5925	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
   5926	return 1;
   5927}
   5928__setup("ftrace_graph_filter=", set_graph_function);
   5929
   5930static int __init set_graph_notrace_function(char *str)
   5931{
   5932	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
   5933	return 1;
   5934}
   5935__setup("ftrace_graph_notrace=", set_graph_notrace_function);
   5936
   5937static int __init set_graph_max_depth_function(char *str)
   5938{
   5939	if (!str)
   5940		return 0;
   5941	fgraph_max_depth = simple_strtoul(str, NULL, 0);
   5942	return 1;
   5943}
   5944__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
   5945
   5946static void __init set_ftrace_early_graph(char *buf, int enable)
   5947{
   5948	int ret;
   5949	char *func;
   5950	struct ftrace_hash *hash;
   5951
   5952	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
   5953	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
   5954		return;
   5955
   5956	while (buf) {
   5957		func = strsep(&buf, ",");
   5958		/* we allow only one expression at a time */
   5959		ret = ftrace_graph_set_hash(hash, func);
   5960		if (ret)
   5961			printk(KERN_DEBUG "ftrace: function %s not "
   5962					  "traceable\n", func);
   5963	}
   5964
   5965	if (enable)
   5966		ftrace_graph_hash = hash;
   5967	else
   5968		ftrace_graph_notrace_hash = hash;
   5969}
   5970#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
   5971
   5972void __init
   5973ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
   5974{
   5975	char *func;
   5976
   5977	ftrace_ops_init(ops);
   5978
   5979	while (buf) {
   5980		func = strsep(&buf, ",");
   5981		ftrace_set_regex(ops, func, strlen(func), 0, enable);
   5982	}
   5983}
   5984
   5985static void __init set_ftrace_early_filters(void)
   5986{
   5987	if (ftrace_filter_buf[0])
   5988		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
   5989	if (ftrace_notrace_buf[0])
   5990		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
   5991#ifdef CONFIG_FUNCTION_GRAPH_TRACER
   5992	if (ftrace_graph_buf[0])
   5993		set_ftrace_early_graph(ftrace_graph_buf, 1);
   5994	if (ftrace_graph_notrace_buf[0])
   5995		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
   5996#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
   5997}
   5998
   5999int ftrace_regex_release(struct inode *inode, struct file *file)
   6000{
   6001	struct seq_file *m = (struct seq_file *)file->private_data;
   6002	struct ftrace_iterator *iter;
   6003	struct ftrace_hash **orig_hash;
   6004	struct trace_parser *parser;
   6005	int filter_hash;
   6006
   6007	if (file->f_mode & FMODE_READ) {
   6008		iter = m->private;
   6009		seq_release(inode, file);
   6010	} else
   6011		iter = file->private_data;
   6012
   6013	parser = &iter->parser;
   6014	if (trace_parser_loaded(parser)) {
   6015		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
   6016
   6017		ftrace_process_regex(iter, parser->buffer,
   6018				     parser->idx, enable);
   6019	}
   6020
   6021	trace_parser_put(parser);
   6022
   6023	mutex_lock(&iter->ops->func_hash->regex_lock);
   6024
   6025	if (file->f_mode & FMODE_WRITE) {
   6026		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
   6027
   6028		if (filter_hash) {
   6029			orig_hash = &iter->ops->func_hash->filter_hash;
   6030			if (iter->tr && !list_empty(&iter->tr->mod_trace))
   6031				iter->hash->flags |= FTRACE_HASH_FL_MOD;
   6032		} else
   6033			orig_hash = &iter->ops->func_hash->notrace_hash;
   6034
   6035		mutex_lock(&ftrace_lock);
   6036		ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
   6037						      iter->hash, filter_hash);
   6038		mutex_unlock(&ftrace_lock);
   6039	} else {
   6040		/* For read only, the hash is the ops hash */
   6041		iter->hash = NULL;
   6042	}
   6043
   6044	mutex_unlock(&iter->ops->func_hash->regex_lock);
   6045	free_ftrace_hash(iter->hash);
   6046	if (iter->tr)
   6047		trace_array_put(iter->tr);
   6048	kfree(iter);
   6049
   6050	return 0;
   6051}
   6052
   6053static const struct file_operations ftrace_avail_fops = {
   6054	.open = ftrace_avail_open,
   6055	.read = seq_read,
   6056	.llseek = seq_lseek,
   6057	.release = seq_release_private,
   6058};
   6059
   6060static const struct file_operations ftrace_enabled_fops = {
   6061	.open = ftrace_enabled_open,
   6062	.read = seq_read,
   6063	.llseek = seq_lseek,
   6064	.release = seq_release_private,
   6065};
   6066
   6067static const struct file_operations ftrace_filter_fops = {
   6068	.open = ftrace_filter_open,
   6069	.read = seq_read,
   6070	.write = ftrace_filter_write,
   6071	.llseek = tracing_lseek,
   6072	.release = ftrace_regex_release,
   6073};
   6074
   6075static const struct file_operations ftrace_notrace_fops = {
   6076	.open = ftrace_notrace_open,
   6077	.read = seq_read,
   6078	.write = ftrace_notrace_write,
   6079	.llseek = tracing_lseek,
   6080	.release = ftrace_regex_release,
   6081};
   6082
   6083#ifdef CONFIG_FUNCTION_GRAPH_TRACER
   6084
   6085static DEFINE_MUTEX(graph_lock);
   6086
   6087struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
   6088struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
   6089
   6090enum graph_filter_type {
   6091	GRAPH_FILTER_NOTRACE	= 0,
   6092	GRAPH_FILTER_FUNCTION,
   6093};
   6094
   6095#define FTRACE_GRAPH_EMPTY	((void *)1)
   6096
   6097struct ftrace_graph_data {
   6098	struct ftrace_hash		*hash;
   6099	struct ftrace_func_entry	*entry;
   6100	int				idx;   /* for hash table iteration */
   6101	enum graph_filter_type		type;
   6102	struct ftrace_hash		*new_hash;
   6103	const struct seq_operations	*seq_ops;
   6104	struct trace_parser		parser;
   6105};
   6106
   6107static void *
   6108__g_next(struct seq_file *m, loff_t *pos)
   6109{
   6110	struct ftrace_graph_data *fgd = m->private;
   6111	struct ftrace_func_entry *entry = fgd->entry;
   6112	struct hlist_head *head;
   6113	int i, idx = fgd->idx;
   6114
   6115	if (*pos >= fgd->hash->count)
   6116		return NULL;
   6117
   6118	if (entry) {
   6119		hlist_for_each_entry_continue(entry, hlist) {
   6120			fgd->entry = entry;
   6121			return entry;
   6122		}
   6123
   6124		idx++;
   6125	}
   6126
   6127	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
   6128		head = &fgd->hash->buckets[i];
   6129		hlist_for_each_entry(entry, head, hlist) {
   6130			fgd->entry = entry;
   6131			fgd->idx = i;
   6132			return entry;
   6133		}
   6134	}
   6135	return NULL;
   6136}
   6137
   6138static void *
   6139g_next(struct seq_file *m, void *v, loff_t *pos)
   6140{
   6141	(*pos)++;
   6142	return __g_next(m, pos);
   6143}
   6144
   6145static void *g_start(struct seq_file *m, loff_t *pos)
   6146{
   6147	struct ftrace_graph_data *fgd = m->private;
   6148
   6149	mutex_lock(&graph_lock);
   6150
   6151	if (fgd->type == GRAPH_FILTER_FUNCTION)
   6152		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
   6153					lockdep_is_held(&graph_lock));
   6154	else
   6155		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
   6156					lockdep_is_held(&graph_lock));
   6157
   6158	/* Nothing, tell g_show to print all functions are enabled */
   6159	if (ftrace_hash_empty(fgd->hash) && !*pos)
   6160		return FTRACE_GRAPH_EMPTY;
   6161
   6162	fgd->idx = 0;
   6163	fgd->entry = NULL;
   6164	return __g_next(m, pos);
   6165}
   6166
   6167static void g_stop(struct seq_file *m, void *p)
   6168{
   6169	mutex_unlock(&graph_lock);
   6170}
   6171
   6172static int g_show(struct seq_file *m, void *v)
   6173{
   6174	struct ftrace_func_entry *entry = v;
   6175
   6176	if (!entry)
   6177		return 0;
   6178
   6179	if (entry == FTRACE_GRAPH_EMPTY) {
   6180		struct ftrace_graph_data *fgd = m->private;
   6181
   6182		if (fgd->type == GRAPH_FILTER_FUNCTION)
   6183			seq_puts(m, "#### all functions enabled ####\n");
   6184		else
   6185			seq_puts(m, "#### no functions disabled ####\n");
   6186		return 0;
   6187	}
   6188
   6189	seq_printf(m, "%ps\n", (void *)entry->ip);
   6190
   6191	return 0;
   6192}
   6193
   6194static const struct seq_operations ftrace_graph_seq_ops = {
   6195	.start = g_start,
   6196	.next = g_next,
   6197	.stop = g_stop,
   6198	.show = g_show,
   6199};
   6200
   6201static int
   6202__ftrace_graph_open(struct inode *inode, struct file *file,
   6203		    struct ftrace_graph_data *fgd)
   6204{
   6205	int ret;
   6206	struct ftrace_hash *new_hash = NULL;
   6207
   6208	ret = security_locked_down(LOCKDOWN_TRACEFS);
   6209	if (ret)
   6210		return ret;
   6211
   6212	if (file->f_mode & FMODE_WRITE) {
   6213		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
   6214
   6215		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
   6216			return -ENOMEM;
   6217
   6218		if (file->f_flags & O_TRUNC)
   6219			new_hash = alloc_ftrace_hash(size_bits);
   6220		else
   6221			new_hash = alloc_and_copy_ftrace_hash(size_bits,
   6222							      fgd->hash);
   6223		if (!new_hash) {
   6224			ret = -ENOMEM;
   6225			goto out;
   6226		}
   6227	}
   6228
   6229	if (file->f_mode & FMODE_READ) {
   6230		ret = seq_open(file, &ftrace_graph_seq_ops);
   6231		if (!ret) {
   6232			struct seq_file *m = file->private_data;
   6233			m->private = fgd;
   6234		} else {
   6235			/* Failed */
   6236			free_ftrace_hash(new_hash);
   6237			new_hash = NULL;
   6238		}
   6239	} else
   6240		file->private_data = fgd;
   6241
   6242out:
   6243	if (ret < 0 && file->f_mode & FMODE_WRITE)
   6244		trace_parser_put(&fgd->parser);
   6245
   6246	fgd->new_hash = new_hash;
   6247
   6248	/*
   6249	 * All uses of fgd->hash must be taken with the graph_lock
   6250	 * held. The graph_lock is going to be released, so force
   6251	 * fgd->hash to be reinitialized when it is taken again.
   6252	 */
   6253	fgd->hash = NULL;
   6254
   6255	return ret;
   6256}
   6257
   6258static int
   6259ftrace_graph_open(struct inode *inode, struct file *file)
   6260{
   6261	struct ftrace_graph_data *fgd;
   6262	int ret;
   6263
   6264	if (unlikely(ftrace_disabled))
   6265		return -ENODEV;
   6266
   6267	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
   6268	if (fgd == NULL)
   6269		return -ENOMEM;
   6270
   6271	mutex_lock(&graph_lock);
   6272
   6273	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
   6274					lockdep_is_held(&graph_lock));
   6275	fgd->type = GRAPH_FILTER_FUNCTION;
   6276	fgd->seq_ops = &ftrace_graph_seq_ops;
   6277
   6278	ret = __ftrace_graph_open(inode, file, fgd);
   6279	if (ret < 0)
   6280		kfree(fgd);
   6281
   6282	mutex_unlock(&graph_lock);
   6283	return ret;
   6284}
   6285
   6286static int
   6287ftrace_graph_notrace_open(struct inode *inode, struct file *file)
   6288{
   6289	struct ftrace_graph_data *fgd;
   6290	int ret;
   6291
   6292	if (unlikely(ftrace_disabled))
   6293		return -ENODEV;
   6294
   6295	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
   6296	if (fgd == NULL)
   6297		return -ENOMEM;
   6298
   6299	mutex_lock(&graph_lock);
   6300
   6301	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
   6302					lockdep_is_held(&graph_lock));
   6303	fgd->type = GRAPH_FILTER_NOTRACE;
   6304	fgd->seq_ops = &ftrace_graph_seq_ops;
   6305
   6306	ret = __ftrace_graph_open(inode, file, fgd);
   6307	if (ret < 0)
   6308		kfree(fgd);
   6309
   6310	mutex_unlock(&graph_lock);
   6311	return ret;
   6312}
   6313
   6314static int
   6315ftrace_graph_release(struct inode *inode, struct file *file)
   6316{
   6317	struct ftrace_graph_data *fgd;
   6318	struct ftrace_hash *old_hash, *new_hash;
   6319	struct trace_parser *parser;
   6320	int ret = 0;
   6321
   6322	if (file->f_mode & FMODE_READ) {
   6323		struct seq_file *m = file->private_data;
   6324
   6325		fgd = m->private;
   6326		seq_release(inode, file);
   6327	} else {
   6328		fgd = file->private_data;
   6329	}
   6330
   6331
   6332	if (file->f_mode & FMODE_WRITE) {
   6333
   6334		parser = &fgd->parser;
   6335
   6336		if (trace_parser_loaded((parser))) {
   6337			ret = ftrace_graph_set_hash(fgd->new_hash,
   6338						    parser->buffer);
   6339		}
   6340
   6341		trace_parser_put(parser);
   6342
   6343		new_hash = __ftrace_hash_move(fgd->new_hash);
   6344		if (!new_hash) {
   6345			ret = -ENOMEM;
   6346			goto out;
   6347		}
   6348
   6349		mutex_lock(&graph_lock);
   6350
   6351		if (fgd->type == GRAPH_FILTER_FUNCTION) {
   6352			old_hash = rcu_dereference_protected(ftrace_graph_hash,
   6353					lockdep_is_held(&graph_lock));
   6354			rcu_assign_pointer(ftrace_graph_hash, new_hash);
   6355		} else {
   6356			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
   6357					lockdep_is_held(&graph_lock));
   6358			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
   6359		}
   6360
   6361		mutex_unlock(&graph_lock);
   6362
   6363		/*
   6364		 * We need to do a hard force of sched synchronization.
   6365		 * This is because we use preempt_disable() to do RCU, but
   6366		 * the function tracers can be called where RCU is not watching
   6367		 * (like before user_exit()). We can not rely on the RCU
   6368		 * infrastructure to do the synchronization, thus we must do it
   6369		 * ourselves.
   6370		 */
   6371		if (old_hash != EMPTY_HASH)
   6372			synchronize_rcu_tasks_rude();
   6373
   6374		free_ftrace_hash(old_hash);
   6375	}
   6376
   6377 out:
   6378	free_ftrace_hash(fgd->new_hash);
   6379	kfree(fgd);
   6380
   6381	return ret;
   6382}
   6383
   6384static int
   6385ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
   6386{
   6387	struct ftrace_glob func_g;
   6388	struct dyn_ftrace *rec;
   6389	struct ftrace_page *pg;
   6390	struct ftrace_func_entry *entry;
   6391	int fail = 1;
   6392	int not;
   6393
   6394	/* decode regex */
   6395	func_g.type = filter_parse_regex(buffer, strlen(buffer),
   6396					 &func_g.search, &not);
   6397
   6398	func_g.len = strlen(func_g.search);
   6399
   6400	mutex_lock(&ftrace_lock);
   6401
   6402	if (unlikely(ftrace_disabled)) {
   6403		mutex_unlock(&ftrace_lock);
   6404		return -ENODEV;
   6405	}
   6406
   6407	do_for_each_ftrace_rec(pg, rec) {
   6408
   6409		if (rec->flags & FTRACE_FL_DISABLED)
   6410			continue;
   6411
   6412		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
   6413			entry = ftrace_lookup_ip(hash, rec->ip);
   6414
   6415			if (!not) {
   6416				fail = 0;
   6417
   6418				if (entry)
   6419					continue;
   6420				if (add_hash_entry(hash, rec->ip) < 0)
   6421					goto out;
   6422			} else {
   6423				if (entry) {
   6424					free_hash_entry(hash, entry);
   6425					fail = 0;
   6426				}
   6427			}
   6428		}
   6429	} while_for_each_ftrace_rec();
   6430out:
   6431	mutex_unlock(&ftrace_lock);
   6432
   6433	if (fail)
   6434		return -EINVAL;
   6435
   6436	return 0;
   6437}
   6438
   6439static ssize_t
   6440ftrace_graph_write(struct file *file, const char __user *ubuf,
   6441		   size_t cnt, loff_t *ppos)
   6442{
   6443	ssize_t read, ret = 0;
   6444	struct ftrace_graph_data *fgd = file->private_data;
   6445	struct trace_parser *parser;
   6446
   6447	if (!cnt)
   6448		return 0;
   6449
   6450	/* Read mode uses seq functions */
   6451	if (file->f_mode & FMODE_READ) {
   6452		struct seq_file *m = file->private_data;
   6453		fgd = m->private;
   6454	}
   6455
   6456	parser = &fgd->parser;
   6457
   6458	read = trace_get_user(parser, ubuf, cnt, ppos);
   6459
   6460	if (read >= 0 && trace_parser_loaded(parser) &&
   6461	    !trace_parser_cont(parser)) {
   6462
   6463		ret = ftrace_graph_set_hash(fgd->new_hash,
   6464					    parser->buffer);
   6465		trace_parser_clear(parser);
   6466	}
   6467
   6468	if (!ret)
   6469		ret = read;
   6470
   6471	return ret;
   6472}
   6473
   6474static const struct file_operations ftrace_graph_fops = {
   6475	.open		= ftrace_graph_open,
   6476	.read		= seq_read,
   6477	.write		= ftrace_graph_write,
   6478	.llseek		= tracing_lseek,
   6479	.release	= ftrace_graph_release,
   6480};
   6481
   6482static const struct file_operations ftrace_graph_notrace_fops = {
   6483	.open		= ftrace_graph_notrace_open,
   6484	.read		= seq_read,
   6485	.write		= ftrace_graph_write,
   6486	.llseek		= tracing_lseek,
   6487	.release	= ftrace_graph_release,
   6488};
   6489#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
   6490
   6491void ftrace_create_filter_files(struct ftrace_ops *ops,
   6492				struct dentry *parent)
   6493{
   6494
   6495	trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
   6496			  ops, &ftrace_filter_fops);
   6497
   6498	trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
   6499			  ops, &ftrace_notrace_fops);
   6500}
   6501
   6502/*
   6503 * The name "destroy_filter_files" is really a misnomer. Although
   6504 * in the future, it may actually delete the files, but this is
   6505 * really intended to make sure the ops passed in are disabled
   6506 * and that when this function returns, the caller is free to
   6507 * free the ops.
   6508 *
   6509 * The "destroy" name is only to match the "create" name that this
   6510 * should be paired with.
   6511 */
   6512void ftrace_destroy_filter_files(struct ftrace_ops *ops)
   6513{
   6514	mutex_lock(&ftrace_lock);
   6515	if (ops->flags & FTRACE_OPS_FL_ENABLED)
   6516		ftrace_shutdown(ops, 0);
   6517	ops->flags |= FTRACE_OPS_FL_DELETED;
   6518	ftrace_free_filter(ops);
   6519	mutex_unlock(&ftrace_lock);
   6520}
   6521
   6522static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
   6523{
   6524
   6525	trace_create_file("available_filter_functions", TRACE_MODE_READ,
   6526			d_tracer, NULL, &ftrace_avail_fops);
   6527
   6528	trace_create_file("enabled_functions", TRACE_MODE_READ,
   6529			d_tracer, NULL, &ftrace_enabled_fops);
   6530
   6531	ftrace_create_filter_files(&global_ops, d_tracer);
   6532
   6533#ifdef CONFIG_FUNCTION_GRAPH_TRACER
   6534	trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
   6535				    NULL,
   6536				    &ftrace_graph_fops);
   6537	trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
   6538				    NULL,
   6539				    &ftrace_graph_notrace_fops);
   6540#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
   6541
   6542	return 0;
   6543}
   6544
   6545static int ftrace_cmp_ips(const void *a, const void *b)
   6546{
   6547	const unsigned long *ipa = a;
   6548	const unsigned long *ipb = b;
   6549
   6550	if (*ipa > *ipb)
   6551		return 1;
   6552	if (*ipa < *ipb)
   6553		return -1;
   6554	return 0;
   6555}
   6556
   6557#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
   6558static void test_is_sorted(unsigned long *start, unsigned long count)
   6559{
   6560	int i;
   6561
   6562	for (i = 1; i < count; i++) {
   6563		if (WARN(start[i - 1] > start[i],
   6564			 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
   6565			 (void *)start[i - 1], start[i - 1],
   6566			 (void *)start[i], start[i]))
   6567			break;
   6568	}
   6569	if (i == count)
   6570		pr_info("ftrace section at %px sorted properly\n", start);
   6571}
   6572#else
   6573static void test_is_sorted(unsigned long *start, unsigned long count)
   6574{
   6575}
   6576#endif
   6577
   6578static int ftrace_process_locs(struct module *mod,
   6579			       unsigned long *start,
   6580			       unsigned long *end)
   6581{
   6582	struct ftrace_page *start_pg;
   6583	struct ftrace_page *pg;
   6584	struct dyn_ftrace *rec;
   6585	unsigned long count;
   6586	unsigned long *p;
   6587	unsigned long addr;
   6588	unsigned long flags = 0; /* Shut up gcc */
   6589	int ret = -ENOMEM;
   6590
   6591	count = end - start;
   6592
   6593	if (!count)
   6594		return 0;
   6595
   6596	/*
   6597	 * Sorting mcount in vmlinux at build time depend on
   6598	 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
   6599	 * modules can not be sorted at build time.
   6600	 */
   6601	if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
   6602		sort(start, count, sizeof(*start),
   6603		     ftrace_cmp_ips, NULL);
   6604	} else {
   6605		test_is_sorted(start, count);
   6606	}
   6607
   6608	start_pg = ftrace_allocate_pages(count);
   6609	if (!start_pg)
   6610		return -ENOMEM;
   6611
   6612	mutex_lock(&ftrace_lock);
   6613
   6614	/*
   6615	 * Core and each module needs their own pages, as
   6616	 * modules will free them when they are removed.
   6617	 * Force a new page to be allocated for modules.
   6618	 */
   6619	if (!mod) {
   6620		WARN_ON(ftrace_pages || ftrace_pages_start);
   6621		/* First initialization */
   6622		ftrace_pages = ftrace_pages_start = start_pg;
   6623	} else {
   6624		if (!ftrace_pages)
   6625			goto out;
   6626
   6627		if (WARN_ON(ftrace_pages->next)) {
   6628			/* Hmm, we have free pages? */
   6629			while (ftrace_pages->next)
   6630				ftrace_pages = ftrace_pages->next;
   6631		}
   6632
   6633		ftrace_pages->next = start_pg;
   6634	}
   6635
   6636	p = start;
   6637	pg = start_pg;
   6638	while (p < end) {
   6639		unsigned long end_offset;
   6640		addr = ftrace_call_adjust(*p++);
   6641		/*
   6642		 * Some architecture linkers will pad between
   6643		 * the different mcount_loc sections of different
   6644		 * object files to satisfy alignments.
   6645		 * Skip any NULL pointers.
   6646		 */
   6647		if (!addr)
   6648			continue;
   6649
   6650		end_offset = (pg->index+1) * sizeof(pg->records[0]);
   6651		if (end_offset > PAGE_SIZE << pg->order) {
   6652			/* We should have allocated enough */
   6653			if (WARN_ON(!pg->next))
   6654				break;
   6655			pg = pg->next;
   6656		}
   6657
   6658		rec = &pg->records[pg->index++];
   6659		rec->ip = addr;
   6660	}
   6661
   6662	/* We should have used all pages */
   6663	WARN_ON(pg->next);
   6664
   6665	/* Assign the last page to ftrace_pages */
   6666	ftrace_pages = pg;
   6667
   6668	/*
   6669	 * We only need to disable interrupts on start up
   6670	 * because we are modifying code that an interrupt
   6671	 * may execute, and the modification is not atomic.
   6672	 * But for modules, nothing runs the code we modify
   6673	 * until we are finished with it, and there's no
   6674	 * reason to cause large interrupt latencies while we do it.
   6675	 */
   6676	if (!mod)
   6677		local_irq_save(flags);
   6678	ftrace_update_code(mod, start_pg);
   6679	if (!mod)
   6680		local_irq_restore(flags);
   6681	ret = 0;
   6682 out:
   6683	mutex_unlock(&ftrace_lock);
   6684
   6685	return ret;
   6686}
   6687
   6688struct ftrace_mod_func {
   6689	struct list_head	list;
   6690	char			*name;
   6691	unsigned long		ip;
   6692	unsigned int		size;
   6693};
   6694
   6695struct ftrace_mod_map {
   6696	struct rcu_head		rcu;
   6697	struct list_head	list;
   6698	struct module		*mod;
   6699	unsigned long		start_addr;
   6700	unsigned long		end_addr;
   6701	struct list_head	funcs;
   6702	unsigned int		num_funcs;
   6703};
   6704
   6705static int ftrace_get_trampoline_kallsym(unsigned int symnum,
   6706					 unsigned long *value, char *type,
   6707					 char *name, char *module_name,
   6708					 int *exported)
   6709{
   6710	struct ftrace_ops *op;
   6711
   6712	list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
   6713		if (!op->trampoline || symnum--)
   6714			continue;
   6715		*value = op->trampoline;
   6716		*type = 't';
   6717		strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
   6718		strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
   6719		*exported = 0;
   6720		return 0;
   6721	}
   6722
   6723	return -ERANGE;
   6724}
   6725
   6726#ifdef CONFIG_MODULES
   6727
   6728#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
   6729
   6730static LIST_HEAD(ftrace_mod_maps);
   6731
   6732static int referenced_filters(struct dyn_ftrace *rec)
   6733{
   6734	struct ftrace_ops *ops;
   6735	int cnt = 0;
   6736
   6737	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
   6738		if (ops_references_rec(ops, rec)) {
   6739			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
   6740				continue;
   6741			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
   6742				continue;
   6743			cnt++;
   6744			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
   6745				rec->flags |= FTRACE_FL_REGS;
   6746			if (cnt == 1 && ops->trampoline)
   6747				rec->flags |= FTRACE_FL_TRAMP;
   6748			else
   6749				rec->flags &= ~FTRACE_FL_TRAMP;
   6750		}
   6751	}
   6752
   6753	return cnt;
   6754}
   6755
   6756static void
   6757clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
   6758{
   6759	struct ftrace_func_entry *entry;
   6760	struct dyn_ftrace *rec;
   6761	int i;
   6762
   6763	if (ftrace_hash_empty(hash))
   6764		return;
   6765
   6766	for (i = 0; i < pg->index; i++) {
   6767		rec = &pg->records[i];
   6768		entry = __ftrace_lookup_ip(hash, rec->ip);
   6769		/*
   6770		 * Do not allow this rec to match again.
   6771		 * Yeah, it may waste some memory, but will be removed
   6772		 * if/when the hash is modified again.
   6773		 */
   6774		if (entry)
   6775			entry->ip = 0;
   6776	}
   6777}
   6778
   6779/* Clear any records from hashes */
   6780static void clear_mod_from_hashes(struct ftrace_page *pg)
   6781{
   6782	struct trace_array *tr;
   6783
   6784	mutex_lock(&trace_types_lock);
   6785	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
   6786		if (!tr->ops || !tr->ops->func_hash)
   6787			continue;
   6788		mutex_lock(&tr->ops->func_hash->regex_lock);
   6789		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
   6790		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
   6791		mutex_unlock(&tr->ops->func_hash->regex_lock);
   6792	}
   6793	mutex_unlock(&trace_types_lock);
   6794}
   6795
   6796static void ftrace_free_mod_map(struct rcu_head *rcu)
   6797{
   6798	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
   6799	struct ftrace_mod_func *mod_func;
   6800	struct ftrace_mod_func *n;
   6801
   6802	/* All the contents of mod_map are now not visible to readers */
   6803	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
   6804		kfree(mod_func->name);
   6805		list_del(&mod_func->list);
   6806		kfree(mod_func);
   6807	}
   6808
   6809	kfree(mod_map);
   6810}
   6811
   6812void ftrace_release_mod(struct module *mod)
   6813{
   6814	struct ftrace_mod_map *mod_map;
   6815	struct ftrace_mod_map *n;
   6816	struct dyn_ftrace *rec;
   6817	struct ftrace_page **last_pg;
   6818	struct ftrace_page *tmp_page = NULL;
   6819	struct ftrace_page *pg;
   6820
   6821	mutex_lock(&ftrace_lock);
   6822
   6823	if (ftrace_disabled)
   6824		goto out_unlock;
   6825
   6826	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
   6827		if (mod_map->mod == mod) {
   6828			list_del_rcu(&mod_map->list);
   6829			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
   6830			break;
   6831		}
   6832	}
   6833
   6834	/*
   6835	 * Each module has its own ftrace_pages, remove
   6836	 * them from the list.
   6837	 */
   6838	last_pg = &ftrace_pages_start;
   6839	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
   6840		rec = &pg->records[0];
   6841		if (within_module_core(rec->ip, mod) ||
   6842		    within_module_init(rec->ip, mod)) {
   6843			/*
   6844			 * As core pages are first, the first
   6845			 * page should never be a module page.
   6846			 */
   6847			if (WARN_ON(pg == ftrace_pages_start))
   6848				goto out_unlock;
   6849
   6850			/* Check if we are deleting the last page */
   6851			if (pg == ftrace_pages)
   6852				ftrace_pages = next_to_ftrace_page(last_pg);
   6853
   6854			ftrace_update_tot_cnt -= pg->index;
   6855			*last_pg = pg->next;
   6856
   6857			pg->next = tmp_page;
   6858			tmp_page = pg;
   6859		} else
   6860			last_pg = &pg->next;
   6861	}
   6862 out_unlock:
   6863	mutex_unlock(&ftrace_lock);
   6864
   6865	for (pg = tmp_page; pg; pg = tmp_page) {
   6866
   6867		/* Needs to be called outside of ftrace_lock */
   6868		clear_mod_from_hashes(pg);
   6869
   6870		if (pg->records) {
   6871			free_pages((unsigned long)pg->records, pg->order);
   6872			ftrace_number_of_pages -= 1 << pg->order;
   6873		}
   6874		tmp_page = pg->next;
   6875		kfree(pg);
   6876		ftrace_number_of_groups--;
   6877	}
   6878}
   6879
   6880void ftrace_module_enable(struct module *mod)
   6881{
   6882	struct dyn_ftrace *rec;
   6883	struct ftrace_page *pg;
   6884
   6885	mutex_lock(&ftrace_lock);
   6886
   6887	if (ftrace_disabled)
   6888		goto out_unlock;
   6889
   6890	/*
   6891	 * If the tracing is enabled, go ahead and enable the record.
   6892	 *
   6893	 * The reason not to enable the record immediately is the
   6894	 * inherent check of ftrace_make_nop/ftrace_make_call for
   6895	 * correct previous instructions.  Making first the NOP
   6896	 * conversion puts the module to the correct state, thus
   6897	 * passing the ftrace_make_call check.
   6898	 *
   6899	 * We also delay this to after the module code already set the
   6900	 * text to read-only, as we now need to set it back to read-write
   6901	 * so that we can modify the text.
   6902	 */
   6903	if (ftrace_start_up)
   6904		ftrace_arch_code_modify_prepare();
   6905
   6906	do_for_each_ftrace_rec(pg, rec) {
   6907		int cnt;
   6908		/*
   6909		 * do_for_each_ftrace_rec() is a double loop.
   6910		 * module text shares the pg. If a record is
   6911		 * not part of this module, then skip this pg,
   6912		 * which the "break" will do.
   6913		 */
   6914		if (!within_module_core(rec->ip, mod) &&
   6915		    !within_module_init(rec->ip, mod))
   6916			break;
   6917
   6918		/* Weak functions should still be ignored */
   6919		if (!test_for_valid_rec(rec)) {
   6920			/* Clear all other flags. Should not be enabled anyway */
   6921			rec->flags = FTRACE_FL_DISABLED;
   6922			continue;
   6923		}
   6924
   6925		cnt = 0;
   6926
   6927		/*
   6928		 * When adding a module, we need to check if tracers are
   6929		 * currently enabled and if they are, and can trace this record,
   6930		 * we need to enable the module functions as well as update the
   6931		 * reference counts for those function records.
   6932		 */
   6933		if (ftrace_start_up)
   6934			cnt += referenced_filters(rec);
   6935
   6936		rec->flags &= ~FTRACE_FL_DISABLED;
   6937		rec->flags += cnt;
   6938
   6939		if (ftrace_start_up && cnt) {
   6940			int failed = __ftrace_replace_code(rec, 1);
   6941			if (failed) {
   6942				ftrace_bug(failed, rec);
   6943				goto out_loop;
   6944			}
   6945		}
   6946
   6947	} while_for_each_ftrace_rec();
   6948
   6949 out_loop:
   6950	if (ftrace_start_up)
   6951		ftrace_arch_code_modify_post_process();
   6952
   6953 out_unlock:
   6954	mutex_unlock(&ftrace_lock);
   6955
   6956	process_cached_mods(mod->name);
   6957}
   6958
   6959void ftrace_module_init(struct module *mod)
   6960{
   6961	int ret;
   6962
   6963	if (ftrace_disabled || !mod->num_ftrace_callsites)
   6964		return;
   6965
   6966	ret = ftrace_process_locs(mod, mod->ftrace_callsites,
   6967				  mod->ftrace_callsites + mod->num_ftrace_callsites);
   6968	if (ret)
   6969		pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
   6970			mod->name);
   6971}
   6972
   6973static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
   6974				struct dyn_ftrace *rec)
   6975{
   6976	struct ftrace_mod_func *mod_func;
   6977	unsigned long symsize;
   6978	unsigned long offset;
   6979	char str[KSYM_SYMBOL_LEN];
   6980	char *modname;
   6981	const char *ret;
   6982
   6983	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
   6984	if (!ret)
   6985		return;
   6986
   6987	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
   6988	if (!mod_func)
   6989		return;
   6990
   6991	mod_func->name = kstrdup(str, GFP_KERNEL);
   6992	if (!mod_func->name) {
   6993		kfree(mod_func);
   6994		return;
   6995	}
   6996
   6997	mod_func->ip = rec->ip - offset;
   6998	mod_func->size = symsize;
   6999
   7000	mod_map->num_funcs++;
   7001
   7002	list_add_rcu(&mod_func->list, &mod_map->funcs);
   7003}
   7004
   7005static struct ftrace_mod_map *
   7006allocate_ftrace_mod_map(struct module *mod,
   7007			unsigned long start, unsigned long end)
   7008{
   7009	struct ftrace_mod_map *mod_map;
   7010
   7011	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
   7012	if (!mod_map)
   7013		return NULL;
   7014
   7015	mod_map->mod = mod;
   7016	mod_map->start_addr = start;
   7017	mod_map->end_addr = end;
   7018	mod_map->num_funcs = 0;
   7019
   7020	INIT_LIST_HEAD_RCU(&mod_map->funcs);
   7021
   7022	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
   7023
   7024	return mod_map;
   7025}
   7026
   7027static const char *
   7028ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
   7029			   unsigned long addr, unsigned long *size,
   7030			   unsigned long *off, char *sym)
   7031{
   7032	struct ftrace_mod_func *found_func =  NULL;
   7033	struct ftrace_mod_func *mod_func;
   7034
   7035	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
   7036		if (addr >= mod_func->ip &&
   7037		    addr < mod_func->ip + mod_func->size) {
   7038			found_func = mod_func;
   7039			break;
   7040		}
   7041	}
   7042
   7043	if (found_func) {
   7044		if (size)
   7045			*size = found_func->size;
   7046		if (off)
   7047			*off = addr - found_func->ip;
   7048		if (sym)
   7049			strlcpy(sym, found_func->name, KSYM_NAME_LEN);
   7050
   7051		return found_func->name;
   7052	}
   7053
   7054	return NULL;
   7055}
   7056
   7057const char *
   7058ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
   7059		   unsigned long *off, char **modname, char *sym)
   7060{
   7061	struct ftrace_mod_map *mod_map;
   7062	const char *ret = NULL;
   7063
   7064	/* mod_map is freed via call_rcu() */
   7065	preempt_disable();
   7066	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
   7067		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
   7068		if (ret) {
   7069			if (modname)
   7070				*modname = mod_map->mod->name;
   7071			break;
   7072		}
   7073	}
   7074	preempt_enable();
   7075
   7076	return ret;
   7077}
   7078
   7079int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
   7080			   char *type, char *name,
   7081			   char *module_name, int *exported)
   7082{
   7083	struct ftrace_mod_map *mod_map;
   7084	struct ftrace_mod_func *mod_func;
   7085	int ret;
   7086
   7087	preempt_disable();
   7088	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
   7089
   7090		if (symnum >= mod_map->num_funcs) {
   7091			symnum -= mod_map->num_funcs;
   7092			continue;
   7093		}
   7094
   7095		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
   7096			if (symnum > 1) {
   7097				symnum--;
   7098				continue;
   7099			}
   7100
   7101			*value = mod_func->ip;
   7102			*type = 'T';
   7103			strlcpy(name, mod_func->name, KSYM_NAME_LEN);
   7104			strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
   7105			*exported = 1;
   7106			preempt_enable();
   7107			return 0;
   7108		}
   7109		WARN_ON(1);
   7110		break;
   7111	}
   7112	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
   7113					    module_name, exported);
   7114	preempt_enable();
   7115	return ret;
   7116}
   7117
   7118#else
   7119static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
   7120				struct dyn_ftrace *rec) { }
   7121static inline struct ftrace_mod_map *
   7122allocate_ftrace_mod_map(struct module *mod,
   7123			unsigned long start, unsigned long end)
   7124{
   7125	return NULL;
   7126}
   7127int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
   7128			   char *type, char *name, char *module_name,
   7129			   int *exported)
   7130{
   7131	int ret;
   7132
   7133	preempt_disable();
   7134	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
   7135					    module_name, exported);
   7136	preempt_enable();
   7137	return ret;
   7138}
   7139#endif /* CONFIG_MODULES */
   7140
   7141struct ftrace_init_func {
   7142	struct list_head list;
   7143	unsigned long ip;
   7144};
   7145
   7146/* Clear any init ips from hashes */
   7147static void
   7148clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
   7149{
   7150	struct ftrace_func_entry *entry;
   7151
   7152	entry = ftrace_lookup_ip(hash, func->ip);
   7153	/*
   7154	 * Do not allow this rec to match again.
   7155	 * Yeah, it may waste some memory, but will be removed
   7156	 * if/when the hash is modified again.
   7157	 */
   7158	if (entry)
   7159		entry->ip = 0;
   7160}
   7161
   7162static void
   7163clear_func_from_hashes(struct ftrace_init_func *func)
   7164{
   7165	struct trace_array *tr;
   7166
   7167	mutex_lock(&trace_types_lock);
   7168	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
   7169		if (!tr->ops || !tr->ops->func_hash)
   7170			continue;
   7171		mutex_lock(&tr->ops->func_hash->regex_lock);
   7172		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
   7173		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
   7174		mutex_unlock(&tr->ops->func_hash->regex_lock);
   7175	}
   7176	mutex_unlock(&trace_types_lock);
   7177}
   7178
   7179static void add_to_clear_hash_list(struct list_head *clear_list,
   7180				   struct dyn_ftrace *rec)
   7181{
   7182	struct ftrace_init_func *func;
   7183
   7184	func = kmalloc(sizeof(*func), GFP_KERNEL);
   7185	if (!func) {
   7186		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
   7187		return;
   7188	}
   7189
   7190	func->ip = rec->ip;
   7191	list_add(&func->list, clear_list);
   7192}
   7193
   7194void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
   7195{
   7196	unsigned long start = (unsigned long)(start_ptr);
   7197	unsigned long end = (unsigned long)(end_ptr);
   7198	struct ftrace_page **last_pg = &ftrace_pages_start;
   7199	struct ftrace_page *pg;
   7200	struct dyn_ftrace *rec;
   7201	struct dyn_ftrace key;
   7202	struct ftrace_mod_map *mod_map = NULL;
   7203	struct ftrace_init_func *func, *func_next;
   7204	struct list_head clear_hash;
   7205
   7206	INIT_LIST_HEAD(&clear_hash);
   7207
   7208	key.ip = start;
   7209	key.flags = end;	/* overload flags, as it is unsigned long */
   7210
   7211	mutex_lock(&ftrace_lock);
   7212
   7213	/*
   7214	 * If we are freeing module init memory, then check if
   7215	 * any tracer is active. If so, we need to save a mapping of
   7216	 * the module functions being freed with the address.
   7217	 */
   7218	if (mod && ftrace_ops_list != &ftrace_list_end)
   7219		mod_map = allocate_ftrace_mod_map(mod, start, end);
   7220
   7221	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
   7222		if (end < pg->records[0].ip ||
   7223		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
   7224			continue;
   7225 again:
   7226		rec = bsearch(&key, pg->records, pg->index,
   7227			      sizeof(struct dyn_ftrace),
   7228			      ftrace_cmp_recs);
   7229		if (!rec)
   7230			continue;
   7231
   7232		/* rec will be cleared from hashes after ftrace_lock unlock */
   7233		add_to_clear_hash_list(&clear_hash, rec);
   7234
   7235		if (mod_map)
   7236			save_ftrace_mod_rec(mod_map, rec);
   7237
   7238		pg->index--;
   7239		ftrace_update_tot_cnt--;
   7240		if (!pg->index) {
   7241			*last_pg = pg->next;
   7242			if (pg->records) {
   7243				free_pages((unsigned long)pg->records, pg->order);
   7244				ftrace_number_of_pages -= 1 << pg->order;
   7245			}
   7246			ftrace_number_of_groups--;
   7247			kfree(pg);
   7248			pg = container_of(last_pg, struct ftrace_page, next);
   7249			if (!(*last_pg))
   7250				ftrace_pages = pg;
   7251			continue;
   7252		}
   7253		memmove(rec, rec + 1,
   7254			(pg->index - (rec - pg->records)) * sizeof(*rec));
   7255		/* More than one function may be in this block */
   7256		goto again;
   7257	}
   7258	mutex_unlock(&ftrace_lock);
   7259
   7260	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
   7261		clear_func_from_hashes(func);
   7262		kfree(func);
   7263	}
   7264}
   7265
   7266void __init ftrace_free_init_mem(void)
   7267{
   7268	void *start = (void *)(&__init_begin);
   7269	void *end = (void *)(&__init_end);
   7270
   7271	ftrace_boot_snapshot();
   7272
   7273	ftrace_free_mem(NULL, start, end);
   7274}
   7275
   7276int __init __weak ftrace_dyn_arch_init(void)
   7277{
   7278	return 0;
   7279}
   7280
   7281void __init ftrace_init(void)
   7282{
   7283	extern unsigned long __start_mcount_loc[];
   7284	extern unsigned long __stop_mcount_loc[];
   7285	unsigned long count, flags;
   7286	int ret;
   7287
   7288	local_irq_save(flags);
   7289	ret = ftrace_dyn_arch_init();
   7290	local_irq_restore(flags);
   7291	if (ret)
   7292		goto failed;
   7293
   7294	count = __stop_mcount_loc - __start_mcount_loc;
   7295	if (!count) {
   7296		pr_info("ftrace: No functions to be traced?\n");
   7297		goto failed;
   7298	}
   7299
   7300	pr_info("ftrace: allocating %ld entries in %ld pages\n",
   7301		count, count / ENTRIES_PER_PAGE + 1);
   7302
   7303	ret = ftrace_process_locs(NULL,
   7304				  __start_mcount_loc,
   7305				  __stop_mcount_loc);
   7306	if (ret) {
   7307		pr_warn("ftrace: failed to allocate entries for functions\n");
   7308		goto failed;
   7309	}
   7310
   7311	pr_info("ftrace: allocated %ld pages with %ld groups\n",
   7312		ftrace_number_of_pages, ftrace_number_of_groups);
   7313
   7314	last_ftrace_enabled = ftrace_enabled = 1;
   7315
   7316	set_ftrace_early_filters();
   7317
   7318	return;
   7319 failed:
   7320	ftrace_disabled = 1;
   7321}
   7322
   7323/* Do nothing if arch does not support this */
   7324void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
   7325{
   7326}
   7327
   7328static void ftrace_update_trampoline(struct ftrace_ops *ops)
   7329{
   7330	unsigned long trampoline = ops->trampoline;
   7331
   7332	arch_ftrace_update_trampoline(ops);
   7333	if (ops->trampoline && ops->trampoline != trampoline &&
   7334	    (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
   7335		/* Add to kallsyms before the perf events */
   7336		ftrace_add_trampoline_to_kallsyms(ops);
   7337		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
   7338				   ops->trampoline, ops->trampoline_size, false,
   7339				   FTRACE_TRAMPOLINE_SYM);
   7340		/*
   7341		 * Record the perf text poke event after the ksymbol register
   7342		 * event.
   7343		 */
   7344		perf_event_text_poke((void *)ops->trampoline, NULL, 0,
   7345				     (void *)ops->trampoline,
   7346				     ops->trampoline_size);
   7347	}
   7348}
   7349
   7350void ftrace_init_trace_array(struct trace_array *tr)
   7351{
   7352	INIT_LIST_HEAD(&tr->func_probes);
   7353	INIT_LIST_HEAD(&tr->mod_trace);
   7354	INIT_LIST_HEAD(&tr->mod_notrace);
   7355}
   7356#else
   7357
   7358struct ftrace_ops global_ops = {
   7359	.func			= ftrace_stub,
   7360	.flags			= FTRACE_OPS_FL_INITIALIZED |
   7361				  FTRACE_OPS_FL_PID,
   7362};
   7363
   7364static int __init ftrace_nodyn_init(void)
   7365{
   7366	ftrace_enabled = 1;
   7367	return 0;
   7368}
   7369core_initcall(ftrace_nodyn_init);
   7370
   7371static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
   7372static inline void ftrace_startup_all(int command) { }
   7373
   7374static void ftrace_update_trampoline(struct ftrace_ops *ops)
   7375{
   7376}
   7377
   7378#endif /* CONFIG_DYNAMIC_FTRACE */
   7379
   7380__init void ftrace_init_global_array_ops(struct trace_array *tr)
   7381{
   7382	tr->ops = &global_ops;
   7383	tr->ops->private = tr;
   7384	ftrace_init_trace_array(tr);
   7385}
   7386
   7387void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
   7388{
   7389	/* If we filter on pids, update to use the pid function */
   7390	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
   7391		if (WARN_ON(tr->ops->func != ftrace_stub))
   7392			printk("ftrace ops had %pS for function\n",
   7393			       tr->ops->func);
   7394	}
   7395	tr->ops->func = func;
   7396	tr->ops->private = tr;
   7397}
   7398
   7399void ftrace_reset_array_ops(struct trace_array *tr)
   7400{
   7401	tr->ops->func = ftrace_stub;
   7402}
   7403
   7404static nokprobe_inline void
   7405__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
   7406		       struct ftrace_ops *ignored, struct ftrace_regs *fregs)
   7407{
   7408	struct pt_regs *regs = ftrace_get_regs(fregs);
   7409	struct ftrace_ops *op;
   7410	int bit;
   7411
   7412	/*
   7413	 * The ftrace_test_and_set_recursion() will disable preemption,
   7414	 * which is required since some of the ops may be dynamically
   7415	 * allocated, they must be freed after a synchronize_rcu().
   7416	 */
   7417	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
   7418	if (bit < 0)
   7419		return;
   7420
   7421	do_for_each_ftrace_op(op, ftrace_ops_list) {
   7422		/* Stub functions don't need to be called nor tested */
   7423		if (op->flags & FTRACE_OPS_FL_STUB)
   7424			continue;
   7425		/*
   7426		 * Check the following for each ops before calling their func:
   7427		 *  if RCU flag is set, then rcu_is_watching() must be true
   7428		 *  if PER_CPU is set, then ftrace_function_local_disable()
   7429		 *                          must be false
   7430		 *  Otherwise test if the ip matches the ops filter
   7431		 *
   7432		 * If any of the above fails then the op->func() is not executed.
   7433		 */
   7434		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
   7435		    ftrace_ops_test(op, ip, regs)) {
   7436			if (FTRACE_WARN_ON(!op->func)) {
   7437				pr_warn("op=%p %pS\n", op, op);
   7438				goto out;
   7439			}
   7440			op->func(ip, parent_ip, op, fregs);
   7441		}
   7442	} while_for_each_ftrace_op(op);
   7443out:
   7444	trace_clear_recursion(bit);
   7445}
   7446
   7447/*
   7448 * Some archs only support passing ip and parent_ip. Even though
   7449 * the list function ignores the op parameter, we do not want any
   7450 * C side effects, where a function is called without the caller
   7451 * sending a third parameter.
   7452 * Archs are to support both the regs and ftrace_ops at the same time.
   7453 * If they support ftrace_ops, it is assumed they support regs.
   7454 * If call backs want to use regs, they must either check for regs
   7455 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
   7456 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
   7457 * An architecture can pass partial regs with ftrace_ops and still
   7458 * set the ARCH_SUPPORTS_FTRACE_OPS.
   7459 *
   7460 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
   7461 * arch_ftrace_ops_list_func.
   7462 */
   7463#if ARCH_SUPPORTS_FTRACE_OPS
   7464void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
   7465			       struct ftrace_ops *op, struct ftrace_regs *fregs)
   7466{
   7467	__ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
   7468}
   7469#else
   7470void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
   7471{
   7472	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
   7473}
   7474#endif
   7475NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
   7476
   7477/*
   7478 * If there's only one function registered but it does not support
   7479 * recursion, needs RCU protection and/or requires per cpu handling, then
   7480 * this function will be called by the mcount trampoline.
   7481 */
   7482static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
   7483				   struct ftrace_ops *op, struct ftrace_regs *fregs)
   7484{
   7485	int bit;
   7486
   7487	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
   7488	if (bit < 0)
   7489		return;
   7490
   7491	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
   7492		op->func(ip, parent_ip, op, fregs);
   7493
   7494	trace_clear_recursion(bit);
   7495}
   7496NOKPROBE_SYMBOL(ftrace_ops_assist_func);
   7497
   7498/**
   7499 * ftrace_ops_get_func - get the function a trampoline should call
   7500 * @ops: the ops to get the function for
   7501 *
   7502 * Normally the mcount trampoline will call the ops->func, but there
   7503 * are times that it should not. For example, if the ops does not
   7504 * have its own recursion protection, then it should call the
   7505 * ftrace_ops_assist_func() instead.
   7506 *
   7507 * Returns the function that the trampoline should call for @ops.
   7508 */
   7509ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
   7510{
   7511	/*
   7512	 * If the function does not handle recursion or needs to be RCU safe,
   7513	 * then we need to call the assist handler.
   7514	 */
   7515	if (ops->flags & (FTRACE_OPS_FL_RECURSION |
   7516			  FTRACE_OPS_FL_RCU))
   7517		return ftrace_ops_assist_func;
   7518
   7519	return ops->func;
   7520}
   7521
   7522static void
   7523ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
   7524				     struct task_struct *prev,
   7525				     struct task_struct *next,
   7526				     unsigned int prev_state)
   7527{
   7528	struct trace_array *tr = data;
   7529	struct trace_pid_list *pid_list;
   7530	struct trace_pid_list *no_pid_list;
   7531
   7532	pid_list = rcu_dereference_sched(tr->function_pids);
   7533	no_pid_list = rcu_dereference_sched(tr->function_no_pids);
   7534
   7535	if (trace_ignore_this_task(pid_list, no_pid_list, next))
   7536		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
   7537			       FTRACE_PID_IGNORE);
   7538	else
   7539		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
   7540			       next->pid);
   7541}
   7542
   7543static void
   7544ftrace_pid_follow_sched_process_fork(void *data,
   7545				     struct task_struct *self,
   7546				     struct task_struct *task)
   7547{
   7548	struct trace_pid_list *pid_list;
   7549	struct trace_array *tr = data;
   7550
   7551	pid_list = rcu_dereference_sched(tr->function_pids);
   7552	trace_filter_add_remove_task(pid_list, self, task);
   7553
   7554	pid_list = rcu_dereference_sched(tr->function_no_pids);
   7555	trace_filter_add_remove_task(pid_list, self, task);
   7556}
   7557
   7558static void
   7559ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
   7560{
   7561	struct trace_pid_list *pid_list;
   7562	struct trace_array *tr = data;
   7563
   7564	pid_list = rcu_dereference_sched(tr->function_pids);
   7565	trace_filter_add_remove_task(pid_list, NULL, task);
   7566
   7567	pid_list = rcu_dereference_sched(tr->function_no_pids);
   7568	trace_filter_add_remove_task(pid_list, NULL, task);
   7569}
   7570
   7571void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
   7572{
   7573	if (enable) {
   7574		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
   7575						  tr);
   7576		register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
   7577						  tr);
   7578	} else {
   7579		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
   7580						    tr);
   7581		unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
   7582						    tr);
   7583	}
   7584}
   7585
   7586static void clear_ftrace_pids(struct trace_array *tr, int type)
   7587{
   7588	struct trace_pid_list *pid_list;
   7589	struct trace_pid_list *no_pid_list;
   7590	int cpu;
   7591
   7592	pid_list = rcu_dereference_protected(tr->function_pids,
   7593					     lockdep_is_held(&ftrace_lock));
   7594	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
   7595						lockdep_is_held(&ftrace_lock));
   7596
   7597	/* Make sure there's something to do */
   7598	if (!pid_type_enabled(type, pid_list, no_pid_list))
   7599		return;
   7600
   7601	/* See if the pids still need to be checked after this */
   7602	if (!still_need_pid_events(type, pid_list, no_pid_list)) {
   7603		unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
   7604		for_each_possible_cpu(cpu)
   7605			per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
   7606	}
   7607
   7608	if (type & TRACE_PIDS)
   7609		rcu_assign_pointer(tr->function_pids, NULL);
   7610
   7611	if (type & TRACE_NO_PIDS)
   7612		rcu_assign_pointer(tr->function_no_pids, NULL);
   7613
   7614	/* Wait till all users are no longer using pid filtering */
   7615	synchronize_rcu();
   7616
   7617	if ((type & TRACE_PIDS) && pid_list)
   7618		trace_pid_list_free(pid_list);
   7619
   7620	if ((type & TRACE_NO_PIDS) && no_pid_list)
   7621		trace_pid_list_free(no_pid_list);
   7622}
   7623
   7624void ftrace_clear_pids(struct trace_array *tr)
   7625{
   7626	mutex_lock(&ftrace_lock);
   7627
   7628	clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
   7629
   7630	mutex_unlock(&ftrace_lock);
   7631}
   7632
   7633static void ftrace_pid_reset(struct trace_array *tr, int type)
   7634{
   7635	mutex_lock(&ftrace_lock);
   7636	clear_ftrace_pids(tr, type);
   7637
   7638	ftrace_update_pid_func();
   7639	ftrace_startup_all(0);
   7640
   7641	mutex_unlock(&ftrace_lock);
   7642}
   7643
   7644/* Greater than any max PID */
   7645#define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
   7646
   7647static void *fpid_start(struct seq_file *m, loff_t *pos)
   7648	__acquires(RCU)
   7649{
   7650	struct trace_pid_list *pid_list;
   7651	struct trace_array *tr = m->private;
   7652
   7653	mutex_lock(&ftrace_lock);
   7654	rcu_read_lock_sched();
   7655
   7656	pid_list = rcu_dereference_sched(tr->function_pids);
   7657
   7658	if (!pid_list)
   7659		return !(*pos) ? FTRACE_NO_PIDS : NULL;
   7660
   7661	return trace_pid_start(pid_list, pos);
   7662}
   7663
   7664static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
   7665{
   7666	struct trace_array *tr = m->private;
   7667	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
   7668
   7669	if (v == FTRACE_NO_PIDS) {
   7670		(*pos)++;
   7671		return NULL;
   7672	}
   7673	return trace_pid_next(pid_list, v, pos);
   7674}
   7675
   7676static void fpid_stop(struct seq_file *m, void *p)
   7677	__releases(RCU)
   7678{
   7679	rcu_read_unlock_sched();
   7680	mutex_unlock(&ftrace_lock);
   7681}
   7682
   7683static int fpid_show(struct seq_file *m, void *v)
   7684{
   7685	if (v == FTRACE_NO_PIDS) {
   7686		seq_puts(m, "no pid\n");
   7687		return 0;
   7688	}
   7689
   7690	return trace_pid_show(m, v);
   7691}
   7692
   7693static const struct seq_operations ftrace_pid_sops = {
   7694	.start = fpid_start,
   7695	.next = fpid_next,
   7696	.stop = fpid_stop,
   7697	.show = fpid_show,
   7698};
   7699
   7700static void *fnpid_start(struct seq_file *m, loff_t *pos)
   7701	__acquires(RCU)
   7702{
   7703	struct trace_pid_list *pid_list;
   7704	struct trace_array *tr = m->private;
   7705
   7706	mutex_lock(&ftrace_lock);
   7707	rcu_read_lock_sched();
   7708
   7709	pid_list = rcu_dereference_sched(tr->function_no_pids);
   7710
   7711	if (!pid_list)
   7712		return !(*pos) ? FTRACE_NO_PIDS : NULL;
   7713
   7714	return trace_pid_start(pid_list, pos);
   7715}
   7716
   7717static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
   7718{
   7719	struct trace_array *tr = m->private;
   7720	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
   7721
   7722	if (v == FTRACE_NO_PIDS) {
   7723		(*pos)++;
   7724		return NULL;
   7725	}
   7726	return trace_pid_next(pid_list, v, pos);
   7727}
   7728
   7729static const struct seq_operations ftrace_no_pid_sops = {
   7730	.start = fnpid_start,
   7731	.next = fnpid_next,
   7732	.stop = fpid_stop,
   7733	.show = fpid_show,
   7734};
   7735
   7736static int pid_open(struct inode *inode, struct file *file, int type)
   7737{
   7738	const struct seq_operations *seq_ops;
   7739	struct trace_array *tr = inode->i_private;
   7740	struct seq_file *m;
   7741	int ret = 0;
   7742
   7743	ret = tracing_check_open_get_tr(tr);
   7744	if (ret)
   7745		return ret;
   7746
   7747	if ((file->f_mode & FMODE_WRITE) &&
   7748	    (file->f_flags & O_TRUNC))
   7749		ftrace_pid_reset(tr, type);
   7750
   7751	switch (type) {
   7752	case TRACE_PIDS:
   7753		seq_ops = &ftrace_pid_sops;
   7754		break;
   7755	case TRACE_NO_PIDS:
   7756		seq_ops = &ftrace_no_pid_sops;
   7757		break;
   7758	default:
   7759		trace_array_put(tr);
   7760		WARN_ON_ONCE(1);
   7761		return -EINVAL;
   7762	}
   7763
   7764	ret = seq_open(file, seq_ops);
   7765	if (ret < 0) {
   7766		trace_array_put(tr);
   7767	} else {
   7768		m = file->private_data;
   7769		/* copy tr over to seq ops */
   7770		m->private = tr;
   7771	}
   7772
   7773	return ret;
   7774}
   7775
   7776static int
   7777ftrace_pid_open(struct inode *inode, struct file *file)
   7778{
   7779	return pid_open(inode, file, TRACE_PIDS);
   7780}
   7781
   7782static int
   7783ftrace_no_pid_open(struct inode *inode, struct file *file)
   7784{
   7785	return pid_open(inode, file, TRACE_NO_PIDS);
   7786}
   7787
   7788static void ignore_task_cpu(void *data)
   7789{
   7790	struct trace_array *tr = data;
   7791	struct trace_pid_list *pid_list;
   7792	struct trace_pid_list *no_pid_list;
   7793
   7794	/*
   7795	 * This function is called by on_each_cpu() while the
   7796	 * event_mutex is held.
   7797	 */
   7798	pid_list = rcu_dereference_protected(tr->function_pids,
   7799					     mutex_is_locked(&ftrace_lock));
   7800	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
   7801						mutex_is_locked(&ftrace_lock));
   7802
   7803	if (trace_ignore_this_task(pid_list, no_pid_list, current))
   7804		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
   7805			       FTRACE_PID_IGNORE);
   7806	else
   7807		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
   7808			       current->pid);
   7809}
   7810
   7811static ssize_t
   7812pid_write(struct file *filp, const char __user *ubuf,
   7813	  size_t cnt, loff_t *ppos, int type)
   7814{
   7815	struct seq_file *m = filp->private_data;
   7816	struct trace_array *tr = m->private;
   7817	struct trace_pid_list *filtered_pids;
   7818	struct trace_pid_list *other_pids;
   7819	struct trace_pid_list *pid_list;
   7820	ssize_t ret;
   7821
   7822	if (!cnt)
   7823		return 0;
   7824
   7825	mutex_lock(&ftrace_lock);
   7826
   7827	switch (type) {
   7828	case TRACE_PIDS:
   7829		filtered_pids = rcu_dereference_protected(tr->function_pids,
   7830					     lockdep_is_held(&ftrace_lock));
   7831		other_pids = rcu_dereference_protected(tr->function_no_pids,
   7832					     lockdep_is_held(&ftrace_lock));
   7833		break;
   7834	case TRACE_NO_PIDS:
   7835		filtered_pids = rcu_dereference_protected(tr->function_no_pids,
   7836					     lockdep_is_held(&ftrace_lock));
   7837		other_pids = rcu_dereference_protected(tr->function_pids,
   7838					     lockdep_is_held(&ftrace_lock));
   7839		break;
   7840	default:
   7841		ret = -EINVAL;
   7842		WARN_ON_ONCE(1);
   7843		goto out;
   7844	}
   7845
   7846	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
   7847	if (ret < 0)
   7848		goto out;
   7849
   7850	switch (type) {
   7851	case TRACE_PIDS:
   7852		rcu_assign_pointer(tr->function_pids, pid_list);
   7853		break;
   7854	case TRACE_NO_PIDS:
   7855		rcu_assign_pointer(tr->function_no_pids, pid_list);
   7856		break;
   7857	}
   7858
   7859
   7860	if (filtered_pids) {
   7861		synchronize_rcu();
   7862		trace_pid_list_free(filtered_pids);
   7863	} else if (pid_list && !other_pids) {
   7864		/* Register a probe to set whether to ignore the tracing of a task */
   7865		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
   7866	}
   7867
   7868	/*
   7869	 * Ignoring of pids is done at task switch. But we have to
   7870	 * check for those tasks that are currently running.
   7871	 * Always do this in case a pid was appended or removed.
   7872	 */
   7873	on_each_cpu(ignore_task_cpu, tr, 1);
   7874
   7875	ftrace_update_pid_func();
   7876	ftrace_startup_all(0);
   7877 out:
   7878	mutex_unlock(&ftrace_lock);
   7879
   7880	if (ret > 0)
   7881		*ppos += ret;
   7882
   7883	return ret;
   7884}
   7885
   7886static ssize_t
   7887ftrace_pid_write(struct file *filp, const char __user *ubuf,
   7888		 size_t cnt, loff_t *ppos)
   7889{
   7890	return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
   7891}
   7892
   7893static ssize_t
   7894ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
   7895		    size_t cnt, loff_t *ppos)
   7896{
   7897	return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
   7898}
   7899
   7900static int
   7901ftrace_pid_release(struct inode *inode, struct file *file)
   7902{
   7903	struct trace_array *tr = inode->i_private;
   7904
   7905	trace_array_put(tr);
   7906
   7907	return seq_release(inode, file);
   7908}
   7909
   7910static const struct file_operations ftrace_pid_fops = {
   7911	.open		= ftrace_pid_open,
   7912	.write		= ftrace_pid_write,
   7913	.read		= seq_read,
   7914	.llseek		= tracing_lseek,
   7915	.release	= ftrace_pid_release,
   7916};
   7917
   7918static const struct file_operations ftrace_no_pid_fops = {
   7919	.open		= ftrace_no_pid_open,
   7920	.write		= ftrace_no_pid_write,
   7921	.read		= seq_read,
   7922	.llseek		= tracing_lseek,
   7923	.release	= ftrace_pid_release,
   7924};
   7925
   7926void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
   7927{
   7928	trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
   7929			    tr, &ftrace_pid_fops);
   7930	trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
   7931			  d_tracer, tr, &ftrace_no_pid_fops);
   7932}
   7933
   7934void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
   7935					 struct dentry *d_tracer)
   7936{
   7937	/* Only the top level directory has the dyn_tracefs and profile */
   7938	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
   7939
   7940	ftrace_init_dyn_tracefs(d_tracer);
   7941	ftrace_profile_tracefs(d_tracer);
   7942}
   7943
   7944/**
   7945 * ftrace_kill - kill ftrace
   7946 *
   7947 * This function should be used by panic code. It stops ftrace
   7948 * but in a not so nice way. If you need to simply kill ftrace
   7949 * from a non-atomic section, use ftrace_kill.
   7950 */
   7951void ftrace_kill(void)
   7952{
   7953	ftrace_disabled = 1;
   7954	ftrace_enabled = 0;
   7955	ftrace_trace_function = ftrace_stub;
   7956}
   7957
   7958/**
   7959 * ftrace_is_dead - Test if ftrace is dead or not.
   7960 *
   7961 * Returns 1 if ftrace is "dead", zero otherwise.
   7962 */
   7963int ftrace_is_dead(void)
   7964{
   7965	return ftrace_disabled;
   7966}
   7967
   7968/**
   7969 * register_ftrace_function - register a function for profiling
   7970 * @ops:	ops structure that holds the function for profiling.
   7971 *
   7972 * Register a function to be called by all functions in the
   7973 * kernel.
   7974 *
   7975 * Note: @ops->func and all the functions it calls must be labeled
   7976 *       with "notrace", otherwise it will go into a
   7977 *       recursive loop.
   7978 */
   7979int register_ftrace_function(struct ftrace_ops *ops)
   7980{
   7981	int ret;
   7982
   7983	ftrace_ops_init(ops);
   7984
   7985	mutex_lock(&ftrace_lock);
   7986
   7987	ret = ftrace_startup(ops, 0);
   7988
   7989	mutex_unlock(&ftrace_lock);
   7990
   7991	return ret;
   7992}
   7993EXPORT_SYMBOL_GPL(register_ftrace_function);
   7994
   7995/**
   7996 * unregister_ftrace_function - unregister a function for profiling.
   7997 * @ops:	ops structure that holds the function to unregister
   7998 *
   7999 * Unregister a function that was added to be called by ftrace profiling.
   8000 */
   8001int unregister_ftrace_function(struct ftrace_ops *ops)
   8002{
   8003	int ret;
   8004
   8005	mutex_lock(&ftrace_lock);
   8006	ret = ftrace_shutdown(ops, 0);
   8007	mutex_unlock(&ftrace_lock);
   8008
   8009	return ret;
   8010}
   8011EXPORT_SYMBOL_GPL(unregister_ftrace_function);
   8012
   8013static int symbols_cmp(const void *a, const void *b)
   8014{
   8015	const char **str_a = (const char **) a;
   8016	const char **str_b = (const char **) b;
   8017
   8018	return strcmp(*str_a, *str_b);
   8019}
   8020
   8021struct kallsyms_data {
   8022	unsigned long *addrs;
   8023	const char **syms;
   8024	size_t cnt;
   8025	size_t found;
   8026};
   8027
   8028static int kallsyms_callback(void *data, const char *name,
   8029			     struct module *mod, unsigned long addr)
   8030{
   8031	struct kallsyms_data *args = data;
   8032	const char **sym;
   8033	int idx;
   8034
   8035	sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
   8036	if (!sym)
   8037		return 0;
   8038
   8039	idx = sym - args->syms;
   8040	if (args->addrs[idx])
   8041		return 0;
   8042
   8043	addr = ftrace_location(addr);
   8044	if (!addr)
   8045		return 0;
   8046
   8047	args->addrs[idx] = addr;
   8048	args->found++;
   8049	return args->found == args->cnt ? 1 : 0;
   8050}
   8051
   8052/**
   8053 * ftrace_lookup_symbols - Lookup addresses for array of symbols
   8054 *
   8055 * @sorted_syms: array of symbols pointers symbols to resolve,
   8056 * must be alphabetically sorted
   8057 * @cnt: number of symbols/addresses in @syms/@addrs arrays
   8058 * @addrs: array for storing resulting addresses
   8059 *
   8060 * This function looks up addresses for array of symbols provided in
   8061 * @syms array (must be alphabetically sorted) and stores them in
   8062 * @addrs array, which needs to be big enough to store at least @cnt
   8063 * addresses.
   8064 *
   8065 * This function returns 0 if all provided symbols are found,
   8066 * -ESRCH otherwise.
   8067 */
   8068int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
   8069{
   8070	struct kallsyms_data args;
   8071	int err;
   8072
   8073	memset(addrs, 0, sizeof(*addrs) * cnt);
   8074	args.addrs = addrs;
   8075	args.syms = sorted_syms;
   8076	args.cnt = cnt;
   8077	args.found = 0;
   8078	err = kallsyms_on_each_symbol(kallsyms_callback, &args);
   8079	if (err < 0)
   8080		return err;
   8081	return args.found == args.cnt ? 0 : -ESRCH;
   8082}
   8083
   8084#ifdef CONFIG_SYSCTL
   8085
   8086#ifdef CONFIG_DYNAMIC_FTRACE
   8087static void ftrace_startup_sysctl(void)
   8088{
   8089	int command;
   8090
   8091	if (unlikely(ftrace_disabled))
   8092		return;
   8093
   8094	/* Force update next time */
   8095	saved_ftrace_func = NULL;
   8096	/* ftrace_start_up is true if we want ftrace running */
   8097	if (ftrace_start_up) {
   8098		command = FTRACE_UPDATE_CALLS;
   8099		if (ftrace_graph_active)
   8100			command |= FTRACE_START_FUNC_RET;
   8101		ftrace_startup_enable(command);
   8102	}
   8103}
   8104
   8105static void ftrace_shutdown_sysctl(void)
   8106{
   8107	int command;
   8108
   8109	if (unlikely(ftrace_disabled))
   8110		return;
   8111
   8112	/* ftrace_start_up is true if ftrace is running */
   8113	if (ftrace_start_up) {
   8114		command = FTRACE_DISABLE_CALLS;
   8115		if (ftrace_graph_active)
   8116			command |= FTRACE_STOP_FUNC_RET;
   8117		ftrace_run_update_code(command);
   8118	}
   8119}
   8120#else
   8121# define ftrace_startup_sysctl()       do { } while (0)
   8122# define ftrace_shutdown_sysctl()      do { } while (0)
   8123#endif /* CONFIG_DYNAMIC_FTRACE */
   8124
   8125static bool is_permanent_ops_registered(void)
   8126{
   8127	struct ftrace_ops *op;
   8128
   8129	do_for_each_ftrace_op(op, ftrace_ops_list) {
   8130		if (op->flags & FTRACE_OPS_FL_PERMANENT)
   8131			return true;
   8132	} while_for_each_ftrace_op(op);
   8133
   8134	return false;
   8135}
   8136
   8137static int
   8138ftrace_enable_sysctl(struct ctl_table *table, int write,
   8139		     void *buffer, size_t *lenp, loff_t *ppos)
   8140{
   8141	int ret = -ENODEV;
   8142
   8143	mutex_lock(&ftrace_lock);
   8144
   8145	if (unlikely(ftrace_disabled))
   8146		goto out;
   8147
   8148	ret = proc_dointvec(table, write, buffer, lenp, ppos);
   8149
   8150	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
   8151		goto out;
   8152
   8153	if (ftrace_enabled) {
   8154
   8155		/* we are starting ftrace again */
   8156		if (rcu_dereference_protected(ftrace_ops_list,
   8157			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
   8158			update_ftrace_function();
   8159
   8160		ftrace_startup_sysctl();
   8161
   8162	} else {
   8163		if (is_permanent_ops_registered()) {
   8164			ftrace_enabled = true;
   8165			ret = -EBUSY;
   8166			goto out;
   8167		}
   8168
   8169		/* stopping ftrace calls (just send to ftrace_stub) */
   8170		ftrace_trace_function = ftrace_stub;
   8171
   8172		ftrace_shutdown_sysctl();
   8173	}
   8174
   8175	last_ftrace_enabled = !!ftrace_enabled;
   8176 out:
   8177	mutex_unlock(&ftrace_lock);
   8178	return ret;
   8179}
   8180
   8181static struct ctl_table ftrace_sysctls[] = {
   8182	{
   8183		.procname       = "ftrace_enabled",
   8184		.data           = &ftrace_enabled,
   8185		.maxlen         = sizeof(int),
   8186		.mode           = 0644,
   8187		.proc_handler   = ftrace_enable_sysctl,
   8188	},
   8189	{}
   8190};
   8191
   8192static int __init ftrace_sysctl_init(void)
   8193{
   8194	register_sysctl_init("kernel", ftrace_sysctls);
   8195	return 0;
   8196}
   8197late_initcall(ftrace_sysctl_init);
   8198#endif