cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

transition.c (18096B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * transition.c - Kernel Live Patching transition functions
      4 *
      5 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
      6 */
      7
      8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      9
     10#include <linux/cpu.h>
     11#include <linux/stacktrace.h>
     12#include "core.h"
     13#include "patch.h"
     14#include "transition.h"
     15
     16#define MAX_STACK_ENTRIES  100
     17#define STACK_ERR_BUF_SIZE 128
     18
     19#define SIGNALS_TIMEOUT 15
     20
     21struct klp_patch *klp_transition_patch;
     22
     23static int klp_target_state = KLP_UNDEFINED;
     24
     25static unsigned int klp_signals_cnt;
     26
     27/*
     28 * This work can be performed periodically to finish patching or unpatching any
     29 * "straggler" tasks which failed to transition in the first attempt.
     30 */
     31static void klp_transition_work_fn(struct work_struct *work)
     32{
     33	mutex_lock(&klp_mutex);
     34
     35	if (klp_transition_patch)
     36		klp_try_complete_transition();
     37
     38	mutex_unlock(&klp_mutex);
     39}
     40static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
     41
     42/*
     43 * This function is just a stub to implement a hard force
     44 * of synchronize_rcu(). This requires synchronizing
     45 * tasks even in userspace and idle.
     46 */
     47static void klp_sync(struct work_struct *work)
     48{
     49}
     50
     51/*
     52 * We allow to patch also functions where RCU is not watching,
     53 * e.g. before user_exit(). We can not rely on the RCU infrastructure
     54 * to do the synchronization. Instead hard force the sched synchronization.
     55 *
     56 * This approach allows to use RCU functions for manipulating func_stack
     57 * safely.
     58 */
     59static void klp_synchronize_transition(void)
     60{
     61	schedule_on_each_cpu(klp_sync);
     62}
     63
     64/*
     65 * The transition to the target patch state is complete.  Clean up the data
     66 * structures.
     67 */
     68static void klp_complete_transition(void)
     69{
     70	struct klp_object *obj;
     71	struct klp_func *func;
     72	struct task_struct *g, *task;
     73	unsigned int cpu;
     74
     75	pr_debug("'%s': completing %s transition\n",
     76		 klp_transition_patch->mod->name,
     77		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
     78
     79	if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
     80		klp_unpatch_replaced_patches(klp_transition_patch);
     81		klp_discard_nops(klp_transition_patch);
     82	}
     83
     84	if (klp_target_state == KLP_UNPATCHED) {
     85		/*
     86		 * All tasks have transitioned to KLP_UNPATCHED so we can now
     87		 * remove the new functions from the func_stack.
     88		 */
     89		klp_unpatch_objects(klp_transition_patch);
     90
     91		/*
     92		 * Make sure klp_ftrace_handler() can no longer see functions
     93		 * from this patch on the ops->func_stack.  Otherwise, after
     94		 * func->transition gets cleared, the handler may choose a
     95		 * removed function.
     96		 */
     97		klp_synchronize_transition();
     98	}
     99
    100	klp_for_each_object(klp_transition_patch, obj)
    101		klp_for_each_func(obj, func)
    102			func->transition = false;
    103
    104	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
    105	if (klp_target_state == KLP_PATCHED)
    106		klp_synchronize_transition();
    107
    108	read_lock(&tasklist_lock);
    109	for_each_process_thread(g, task) {
    110		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
    111		task->patch_state = KLP_UNDEFINED;
    112	}
    113	read_unlock(&tasklist_lock);
    114
    115	for_each_possible_cpu(cpu) {
    116		task = idle_task(cpu);
    117		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
    118		task->patch_state = KLP_UNDEFINED;
    119	}
    120
    121	klp_for_each_object(klp_transition_patch, obj) {
    122		if (!klp_is_object_loaded(obj))
    123			continue;
    124		if (klp_target_state == KLP_PATCHED)
    125			klp_post_patch_callback(obj);
    126		else if (klp_target_state == KLP_UNPATCHED)
    127			klp_post_unpatch_callback(obj);
    128	}
    129
    130	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
    131		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
    132
    133	klp_target_state = KLP_UNDEFINED;
    134	klp_transition_patch = NULL;
    135}
    136
    137/*
    138 * This is called in the error path, to cancel a transition before it has
    139 * started, i.e. klp_init_transition() has been called but
    140 * klp_start_transition() hasn't.  If the transition *has* been started,
    141 * klp_reverse_transition() should be used instead.
    142 */
    143void klp_cancel_transition(void)
    144{
    145	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
    146		return;
    147
    148	pr_debug("'%s': canceling patching transition, going to unpatch\n",
    149		 klp_transition_patch->mod->name);
    150
    151	klp_target_state = KLP_UNPATCHED;
    152	klp_complete_transition();
    153}
    154
    155/*
    156 * Switch the patched state of the task to the set of functions in the target
    157 * patch state.
    158 *
    159 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
    160 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
    161 */
    162void klp_update_patch_state(struct task_struct *task)
    163{
    164	/*
    165	 * A variant of synchronize_rcu() is used to allow patching functions
    166	 * where RCU is not watching, see klp_synchronize_transition().
    167	 */
    168	preempt_disable_notrace();
    169
    170	/*
    171	 * This test_and_clear_tsk_thread_flag() call also serves as a read
    172	 * barrier (smp_rmb) for two cases:
    173	 *
    174	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
    175	 *    klp_target_state read.  The corresponding write barrier is in
    176	 *    klp_init_transition().
    177	 *
    178	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
    179	 *    of func->transition, if klp_ftrace_handler() is called later on
    180	 *    the same CPU.  See __klp_disable_patch().
    181	 */
    182	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
    183		task->patch_state = READ_ONCE(klp_target_state);
    184
    185	preempt_enable_notrace();
    186}
    187
    188/*
    189 * Determine whether the given stack trace includes any references to a
    190 * to-be-patched or to-be-unpatched function.
    191 */
    192static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
    193				unsigned int nr_entries)
    194{
    195	unsigned long func_addr, func_size, address;
    196	struct klp_ops *ops;
    197	int i;
    198
    199	for (i = 0; i < nr_entries; i++) {
    200		address = entries[i];
    201
    202		if (klp_target_state == KLP_UNPATCHED) {
    203			 /*
    204			  * Check for the to-be-unpatched function
    205			  * (the func itself).
    206			  */
    207			func_addr = (unsigned long)func->new_func;
    208			func_size = func->new_size;
    209		} else {
    210			/*
    211			 * Check for the to-be-patched function
    212			 * (the previous func).
    213			 */
    214			ops = klp_find_ops(func->old_func);
    215
    216			if (list_is_singular(&ops->func_stack)) {
    217				/* original function */
    218				func_addr = (unsigned long)func->old_func;
    219				func_size = func->old_size;
    220			} else {
    221				/* previously patched function */
    222				struct klp_func *prev;
    223
    224				prev = list_next_entry(func, stack_node);
    225				func_addr = (unsigned long)prev->new_func;
    226				func_size = prev->new_size;
    227			}
    228		}
    229
    230		if (address >= func_addr && address < func_addr + func_size)
    231			return -EAGAIN;
    232	}
    233
    234	return 0;
    235}
    236
    237/*
    238 * Determine whether it's safe to transition the task to the target patch state
    239 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
    240 */
    241static int klp_check_stack(struct task_struct *task, const char **oldname)
    242{
    243	static unsigned long entries[MAX_STACK_ENTRIES];
    244	struct klp_object *obj;
    245	struct klp_func *func;
    246	int ret, nr_entries;
    247
    248	ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
    249	if (ret < 0)
    250		return -EINVAL;
    251	nr_entries = ret;
    252
    253	klp_for_each_object(klp_transition_patch, obj) {
    254		if (!obj->patched)
    255			continue;
    256		klp_for_each_func(obj, func) {
    257			ret = klp_check_stack_func(func, entries, nr_entries);
    258			if (ret) {
    259				*oldname = func->old_name;
    260				return -EADDRINUSE;
    261			}
    262		}
    263	}
    264
    265	return 0;
    266}
    267
    268static int klp_check_and_switch_task(struct task_struct *task, void *arg)
    269{
    270	int ret;
    271
    272	if (task_curr(task) && task != current)
    273		return -EBUSY;
    274
    275	ret = klp_check_stack(task, arg);
    276	if (ret)
    277		return ret;
    278
    279	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
    280	task->patch_state = klp_target_state;
    281	return 0;
    282}
    283
    284/*
    285 * Try to safely switch a task to the target patch state.  If it's currently
    286 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
    287 * if the stack is unreliable, return false.
    288 */
    289static bool klp_try_switch_task(struct task_struct *task)
    290{
    291	const char *old_name;
    292	int ret;
    293
    294	/* check if this task has already switched over */
    295	if (task->patch_state == klp_target_state)
    296		return true;
    297
    298	/*
    299	 * For arches which don't have reliable stack traces, we have to rely
    300	 * on other methods (e.g., switching tasks at kernel exit).
    301	 */
    302	if (!klp_have_reliable_stack())
    303		return false;
    304
    305	/*
    306	 * Now try to check the stack for any to-be-patched or to-be-unpatched
    307	 * functions.  If all goes well, switch the task to the target patch
    308	 * state.
    309	 */
    310	ret = task_call_func(task, klp_check_and_switch_task, &old_name);
    311	switch (ret) {
    312	case 0:		/* success */
    313		break;
    314
    315	case -EBUSY:	/* klp_check_and_switch_task() */
    316		pr_debug("%s: %s:%d is running\n",
    317			 __func__, task->comm, task->pid);
    318		break;
    319	case -EINVAL:	/* klp_check_and_switch_task() */
    320		pr_debug("%s: %s:%d has an unreliable stack\n",
    321			 __func__, task->comm, task->pid);
    322		break;
    323	case -EADDRINUSE: /* klp_check_and_switch_task() */
    324		pr_debug("%s: %s:%d is sleeping on function %s\n",
    325			 __func__, task->comm, task->pid, old_name);
    326		break;
    327
    328	default:
    329		pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
    330			 __func__, ret, task->comm, task->pid);
    331		break;
    332	}
    333
    334	return !ret;
    335}
    336
    337/*
    338 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
    339 * Kthreads with TIF_PATCH_PENDING set are woken up.
    340 */
    341static void klp_send_signals(void)
    342{
    343	struct task_struct *g, *task;
    344
    345	if (klp_signals_cnt == SIGNALS_TIMEOUT)
    346		pr_notice("signaling remaining tasks\n");
    347
    348	read_lock(&tasklist_lock);
    349	for_each_process_thread(g, task) {
    350		if (!klp_patch_pending(task))
    351			continue;
    352
    353		/*
    354		 * There is a small race here. We could see TIF_PATCH_PENDING
    355		 * set and decide to wake up a kthread or send a fake signal.
    356		 * Meanwhile the task could migrate itself and the action
    357		 * would be meaningless. It is not serious though.
    358		 */
    359		if (task->flags & PF_KTHREAD) {
    360			/*
    361			 * Wake up a kthread which sleeps interruptedly and
    362			 * still has not been migrated.
    363			 */
    364			wake_up_state(task, TASK_INTERRUPTIBLE);
    365		} else {
    366			/*
    367			 * Send fake signal to all non-kthread tasks which are
    368			 * still not migrated.
    369			 */
    370			set_notify_signal(task);
    371		}
    372	}
    373	read_unlock(&tasklist_lock);
    374}
    375
    376/*
    377 * Try to switch all remaining tasks to the target patch state by walking the
    378 * stacks of sleeping tasks and looking for any to-be-patched or
    379 * to-be-unpatched functions.  If such functions are found, the task can't be
    380 * switched yet.
    381 *
    382 * If any tasks are still stuck in the initial patch state, schedule a retry.
    383 */
    384void klp_try_complete_transition(void)
    385{
    386	unsigned int cpu;
    387	struct task_struct *g, *task;
    388	struct klp_patch *patch;
    389	bool complete = true;
    390
    391	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
    392
    393	/*
    394	 * Try to switch the tasks to the target patch state by walking their
    395	 * stacks and looking for any to-be-patched or to-be-unpatched
    396	 * functions.  If such functions are found on a stack, or if the stack
    397	 * is deemed unreliable, the task can't be switched yet.
    398	 *
    399	 * Usually this will transition most (or all) of the tasks on a system
    400	 * unless the patch includes changes to a very common function.
    401	 */
    402	read_lock(&tasklist_lock);
    403	for_each_process_thread(g, task)
    404		if (!klp_try_switch_task(task))
    405			complete = false;
    406	read_unlock(&tasklist_lock);
    407
    408	/*
    409	 * Ditto for the idle "swapper" tasks.
    410	 */
    411	cpus_read_lock();
    412	for_each_possible_cpu(cpu) {
    413		task = idle_task(cpu);
    414		if (cpu_online(cpu)) {
    415			if (!klp_try_switch_task(task)) {
    416				complete = false;
    417				/* Make idle task go through the main loop. */
    418				wake_up_if_idle(cpu);
    419			}
    420		} else if (task->patch_state != klp_target_state) {
    421			/* offline idle tasks can be switched immediately */
    422			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
    423			task->patch_state = klp_target_state;
    424		}
    425	}
    426	cpus_read_unlock();
    427
    428	if (!complete) {
    429		if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
    430			klp_send_signals();
    431		klp_signals_cnt++;
    432
    433		/*
    434		 * Some tasks weren't able to be switched over.  Try again
    435		 * later and/or wait for other methods like kernel exit
    436		 * switching.
    437		 */
    438		schedule_delayed_work(&klp_transition_work,
    439				      round_jiffies_relative(HZ));
    440		return;
    441	}
    442
    443	/* we're done, now cleanup the data structures */
    444	patch = klp_transition_patch;
    445	klp_complete_transition();
    446
    447	/*
    448	 * It would make more sense to free the unused patches in
    449	 * klp_complete_transition() but it is called also
    450	 * from klp_cancel_transition().
    451	 */
    452	if (!patch->enabled)
    453		klp_free_patch_async(patch);
    454	else if (patch->replace)
    455		klp_free_replaced_patches_async(patch);
    456}
    457
    458/*
    459 * Start the transition to the specified target patch state so tasks can begin
    460 * switching to it.
    461 */
    462void klp_start_transition(void)
    463{
    464	struct task_struct *g, *task;
    465	unsigned int cpu;
    466
    467	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
    468
    469	pr_notice("'%s': starting %s transition\n",
    470		  klp_transition_patch->mod->name,
    471		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
    472
    473	/*
    474	 * Mark all normal tasks as needing a patch state update.  They'll
    475	 * switch either in klp_try_complete_transition() or as they exit the
    476	 * kernel.
    477	 */
    478	read_lock(&tasklist_lock);
    479	for_each_process_thread(g, task)
    480		if (task->patch_state != klp_target_state)
    481			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
    482	read_unlock(&tasklist_lock);
    483
    484	/*
    485	 * Mark all idle tasks as needing a patch state update.  They'll switch
    486	 * either in klp_try_complete_transition() or at the idle loop switch
    487	 * point.
    488	 */
    489	for_each_possible_cpu(cpu) {
    490		task = idle_task(cpu);
    491		if (task->patch_state != klp_target_state)
    492			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
    493	}
    494
    495	klp_signals_cnt = 0;
    496}
    497
    498/*
    499 * Initialize the global target patch state and all tasks to the initial patch
    500 * state, and initialize all function transition states to true in preparation
    501 * for patching or unpatching.
    502 */
    503void klp_init_transition(struct klp_patch *patch, int state)
    504{
    505	struct task_struct *g, *task;
    506	unsigned int cpu;
    507	struct klp_object *obj;
    508	struct klp_func *func;
    509	int initial_state = !state;
    510
    511	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
    512
    513	klp_transition_patch = patch;
    514
    515	/*
    516	 * Set the global target patch state which tasks will switch to.  This
    517	 * has no effect until the TIF_PATCH_PENDING flags get set later.
    518	 */
    519	klp_target_state = state;
    520
    521	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
    522		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
    523
    524	/*
    525	 * Initialize all tasks to the initial patch state to prepare them for
    526	 * switching to the target state.
    527	 */
    528	read_lock(&tasklist_lock);
    529	for_each_process_thread(g, task) {
    530		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
    531		task->patch_state = initial_state;
    532	}
    533	read_unlock(&tasklist_lock);
    534
    535	/*
    536	 * Ditto for the idle "swapper" tasks.
    537	 */
    538	for_each_possible_cpu(cpu) {
    539		task = idle_task(cpu);
    540		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
    541		task->patch_state = initial_state;
    542	}
    543
    544	/*
    545	 * Enforce the order of the task->patch_state initializations and the
    546	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
    547	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
    548	 *
    549	 * Also enforce the order of the klp_target_state write and future
    550	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
    551	 * set a task->patch_state to KLP_UNDEFINED.
    552	 */
    553	smp_wmb();
    554
    555	/*
    556	 * Set the func transition states so klp_ftrace_handler() will know to
    557	 * switch to the transition logic.
    558	 *
    559	 * When patching, the funcs aren't yet in the func_stack and will be
    560	 * made visible to the ftrace handler shortly by the calls to
    561	 * klp_patch_object().
    562	 *
    563	 * When unpatching, the funcs are already in the func_stack and so are
    564	 * already visible to the ftrace handler.
    565	 */
    566	klp_for_each_object(patch, obj)
    567		klp_for_each_func(obj, func)
    568			func->transition = true;
    569}
    570
    571/*
    572 * This function can be called in the middle of an existing transition to
    573 * reverse the direction of the target patch state.  This can be done to
    574 * effectively cancel an existing enable or disable operation if there are any
    575 * tasks which are stuck in the initial patch state.
    576 */
    577void klp_reverse_transition(void)
    578{
    579	unsigned int cpu;
    580	struct task_struct *g, *task;
    581
    582	pr_debug("'%s': reversing transition from %s\n",
    583		 klp_transition_patch->mod->name,
    584		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
    585						   "unpatching to patching");
    586
    587	klp_transition_patch->enabled = !klp_transition_patch->enabled;
    588
    589	klp_target_state = !klp_target_state;
    590
    591	/*
    592	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
    593	 * klp_update_patch_state() running in parallel with
    594	 * klp_start_transition().
    595	 */
    596	read_lock(&tasklist_lock);
    597	for_each_process_thread(g, task)
    598		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
    599	read_unlock(&tasklist_lock);
    600
    601	for_each_possible_cpu(cpu)
    602		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
    603
    604	/* Let any remaining calls to klp_update_patch_state() complete */
    605	klp_synchronize_transition();
    606
    607	klp_start_transition();
    608}
    609
    610/* Called from copy_process() during fork */
    611void klp_copy_process(struct task_struct *child)
    612{
    613	child->patch_state = current->patch_state;
    614
    615	/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
    616}
    617
    618/*
    619 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
    620 * existing transition to finish.
    621 *
    622 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
    623 * 'current'. This is not the case here and the consistency model could be
    624 * broken. Administrator, who is the only one to execute the
    625 * klp_force_transitions(), has to be aware of this.
    626 */
    627void klp_force_transition(void)
    628{
    629	struct klp_patch *patch;
    630	struct task_struct *g, *task;
    631	unsigned int cpu;
    632
    633	pr_warn("forcing remaining tasks to the patched state\n");
    634
    635	read_lock(&tasklist_lock);
    636	for_each_process_thread(g, task)
    637		klp_update_patch_state(task);
    638	read_unlock(&tasklist_lock);
    639
    640	for_each_possible_cpu(cpu)
    641		klp_update_patch_state(idle_task(cpu));
    642
    643	/* Set forced flag for patches being removed. */
    644	if (klp_target_state == KLP_UNPATCHED)
    645		klp_transition_patch->forced = true;
    646	else if (klp_transition_patch->replace) {
    647		klp_for_each_patch(patch) {
    648			if (patch != klp_transition_patch)
    649				patch->forced = true;
    650		}
    651	}
    652}