From 00619f7c650e4e46c650cb2e2fd5f438b32dc64b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Sep 2021 21:54:32 +0200 Subject: sched,livepatch: Use task_call_func() Instead of frobbing around with scheduler internals, use the shiny new task_call_func() interface. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Vasily Gorbik Tested-by: Petr Mladek Tested-by: Vasily Gorbik # on s390 Link: https://lkml.kernel.org/r/20210929152428.709906138@infradead.org --- kernel/livepatch/transition.c | 90 +++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 46 deletions(-) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 291b857a6e20..75251e9dbc3c 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -13,7 +13,6 @@ #include "core.h" #include "patch.h" #include "transition.h" -#include "../sched/sched.h" #define MAX_STACK_ENTRIES 100 #define STACK_ERR_BUF_SIZE 128 @@ -240,7 +239,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, * Determine whether it's safe to transition the task to the target patch state * by looking for any to-be-patched or to-be-unpatched functions on its stack. */ -static int klp_check_stack(struct task_struct *task, char *err_buf) +static int klp_check_stack(struct task_struct *task, const char **oldname) { static unsigned long entries[MAX_STACK_ENTRIES]; struct klp_object *obj; @@ -248,12 +247,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) int ret, nr_entries; ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); - if (ret < 0) { - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d has an unreliable stack\n", - __func__, task->comm, task->pid); - return ret; - } + if (ret < 0) + return -EINVAL; nr_entries = ret; klp_for_each_object(klp_transition_patch, obj) { @@ -262,11 +257,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) klp_for_each_func(obj, func) { ret = klp_check_stack_func(func, entries, nr_entries); if (ret) { - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d is sleeping on function %s\n", - __func__, task->comm, task->pid, - func->old_name); - return ret; + *oldname = func->old_name; + return -EADDRINUSE; } } } @@ -274,6 +266,22 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) return 0; } +static int klp_check_and_switch_task(struct task_struct *task, void *arg) +{ + int ret; + + if (task_curr(task) && task != current) + return -EBUSY; + + ret = klp_check_stack(task, arg); + if (ret) + return ret; + + clear_tsk_thread_flag(task, TIF_PATCH_PENDING); + task->patch_state = klp_target_state; + return 0; +} + /* * Try to safely switch a task to the target patch state. If it's currently * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or @@ -281,13 +289,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) */ static bool klp_try_switch_task(struct task_struct *task) { - static char err_buf[STACK_ERR_BUF_SIZE]; - struct rq *rq; - struct rq_flags flags; + const char *old_name; int ret; - bool success = false; - - err_buf[0] = '\0'; /* check if this task has already switched over */ if (task->patch_state == klp_target_state) @@ -305,36 +308,31 @@ static bool klp_try_switch_task(struct task_struct *task) * functions. If all goes well, switch the task to the target patch * state. */ - rq = task_rq_lock(task, &flags); + ret = task_call_func(task, klp_check_and_switch_task, &old_name); + switch (ret) { + case 0: /* success */ + break; - if (task_running(rq, task) && task != current) { - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d is running\n", __func__, task->comm, - task->pid); - goto done; + case -EBUSY: /* klp_check_and_switch_task() */ + pr_debug("%s: %s:%d is running\n", + __func__, task->comm, task->pid); + break; + case -EINVAL: /* klp_check_and_switch_task() */ + pr_debug("%s: %s:%d has an unreliable stack\n", + __func__, task->comm, task->pid); + break; + case -EADDRINUSE: /* klp_check_and_switch_task() */ + pr_debug("%s: %s:%d is sleeping on function %s\n", + __func__, task->comm, task->pid, old_name); + break; + + default: + pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n", + __func__, ret, task->comm, task->pid); + break; } - ret = klp_check_stack(task, err_buf); - if (ret) - goto done; - - success = true; - - clear_tsk_thread_flag(task, TIF_PATCH_PENDING); - task->patch_state = klp_target_state; - -done: - task_rq_unlock(rq, task, &flags); - - /* - * Due to console deadlock issues, pr_debug() can't be used while - * holding the task rq lock. Instead we have to use a temporary buffer - * and print the debug message after releasing the lock. - */ - if (err_buf[0] != '\0') - pr_debug("%s", err_buf); - - return success; + return !ret; } /* -- cgit v1.2.3-71-gd317 From 5de62ea84abd732ded7c5569426fd71c0420f83e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Sep 2021 22:16:02 +0200 Subject: sched,livepatch: Use wake_up_if_idle() Make sure to prod idle CPUs so they call klp_update_patch_state(). Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Vasily Gorbik Tested-by: Petr Mladek Tested-by: Vasily Gorbik # on s390 Link: https://lkml.kernel.org/r/20210929151723.162004989@infradead.org --- include/linux/sched/idle.h | 4 ++++ kernel/livepatch/transition.c | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel/livepatch') diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h index 22873d276be6..d73d314d59c6 100644 --- a/include/linux/sched/idle.h +++ b/include/linux/sched/idle.h @@ -11,7 +11,11 @@ enum cpu_idle_type { CPU_MAX_IDLE_TYPES }; +#ifdef CONFIG_SMP extern void wake_up_if_idle(int cpu); +#else +static inline void wake_up_if_idle(int cpu) { } +#endif /* * Idle thread specific functions to determine the need_resched diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 75251e9dbc3c..5683ac0d2566 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -413,8 +413,11 @@ void klp_try_complete_transition(void) for_each_possible_cpu(cpu) { task = idle_task(cpu); if (cpu_online(cpu)) { - if (!klp_try_switch_task(task)) + if (!klp_try_switch_task(task)) { complete = false; + /* Make idle task go through the main loop. */ + wake_up_if_idle(cpu); + } } else if (task->patch_state != klp_target_state) { /* offline idle tasks can be switched immediately */ clear_tsk_thread_flag(task, TIF_PATCH_PENDING); -- cgit v1.2.3-71-gd317 From ce5e48036c9e76a2a5bd4d9079eac273087a533a Mon Sep 17 00:00:00 2001 From: 王贇 Date: Wed, 27 Oct 2021 11:14:44 +0800 Subject: ftrace: disable preemption when recursion locked As the documentation explained, ftrace_test_recursion_trylock() and ftrace_test_recursion_unlock() were supposed to disable and enable preemption properly, however currently this work is done outside of the function, which could be missing by mistake. And since the internal using of trace_test_and_set_recursion() and trace_clear_recursion() also require preemption disabled, we can just merge the logical. This patch will make sure the preemption has been disabled when trace_test_and_set_recursion() return bit >= 0, and trace_clear_recursion() will enable the preemption if previously enabled. Link: https://lkml.kernel.org/r/13bde807-779c-aa4c-0672-20515ae365ea@linux.alibaba.com CC: Petr Mladek Cc: Guo Ren Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Thomas Gleixner Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Josh Poimboeuf Cc: Jiri Kosina Cc: Joe Lawrence Cc: Masami Hiramatsu Cc: Nicholas Piggin Cc: Jisheng Zhang CC: Steven Rostedt CC: Miroslav Benes Reported-by: Abaci Suggested-by: Peter Zijlstra Signed-off-by: Michael Wang [ Removed extra line in comment - SDR ] Signed-off-by: Steven Rostedt (VMware) --- arch/csky/kernel/probes/ftrace.c | 2 -- arch/parisc/kernel/ftrace.c | 2 -- arch/powerpc/kernel/kprobes-ftrace.c | 2 -- arch/riscv/kernel/probes/ftrace.c | 2 -- arch/x86/kernel/kprobes/ftrace.c | 2 -- include/linux/trace_recursion.h | 11 ++++++++++- kernel/livepatch/patch.c | 12 ++++++------ kernel/trace/ftrace.c | 15 +++++---------- kernel/trace/trace_functions.c | 5 ----- 9 files changed, 21 insertions(+), 32 deletions(-) (limited to 'kernel/livepatch') diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c index b388228abbf2..834cffcfbce3 100644 --- a/arch/csky/kernel/probes/ftrace.c +++ b/arch/csky/kernel/probes/ftrace.c @@ -17,7 +17,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, return; regs = ftrace_get_regs(fregs); - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (!p) { p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); @@ -57,7 +56,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 01581f715737..b14011d3c2f1 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -211,7 +211,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, return; regs = ftrace_get_regs(fregs); - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -240,7 +239,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, } __this_cpu_write(current_kprobe, NULL); out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 7154d58338cc..072ebe7f290b 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -26,7 +26,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, return; regs = ftrace_get_regs(fregs); - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -61,7 +60,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c index aab85a82f419..7142ec42e889 100644 --- a/arch/riscv/kernel/probes/ftrace.c +++ b/arch/riscv/kernel/probes/ftrace.c @@ -15,7 +15,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -52,7 +51,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 596de2f6d3a5..dd2ec14adb77 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -25,7 +25,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -59,7 +58,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index 24f284eb55a7..a13f23b04d73 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -155,6 +155,9 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); # define do_ftrace_record_recursion(ip, pip) do { } while (0) #endif +/* + * Preemption is promised to be disabled when return bit >= 0. + */ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip, int start, int max) { @@ -189,14 +192,20 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign current->trace_recursion = val; barrier(); + preempt_disable_notrace(); + return bit + 1; } +/* + * Preemption will be enabled (if it was previously enabled). + */ static __always_inline void trace_clear_recursion(int bit) { if (!bit) return; + preempt_enable_notrace(); barrier(); bit--; trace_recursion_clear(bit); @@ -209,7 +218,7 @@ static __always_inline void trace_clear_recursion(int bit) * tracing recursed in the same context (normal vs interrupt), * * Returns: -1 if a recursion happened. - * >= 0 if no recursion + * >= 0 if no recursion. */ static __always_inline int ftrace_test_recursion_trylock(unsigned long ip, unsigned long parent_ip) diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index e8029aea67f1..fe316c021d73 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -49,14 +49,15 @@ static void notrace klp_ftrace_handler(unsigned long ip, ops = container_of(fops, struct klp_ops, fops); + /* + * The ftrace_test_recursion_trylock() will disable preemption, + * which is required for the variant of synchronize_rcu() that is + * used to allow patching functions where RCU is not watching. + * See klp_synchronize_transition() for more details. + */ bit = ftrace_test_recursion_trylock(ip, parent_ip); if (WARN_ON_ONCE(bit < 0)) return; - /* - * A variant of synchronize_rcu() is used to allow patching functions - * where RCU is not watching, see klp_synchronize_transition(). - */ - preempt_disable_notrace(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); @@ -120,7 +121,6 @@ static void notrace klp_ftrace_handler(unsigned long ip, klp_arch_set_pc(fregs, (unsigned long)func->new_func); unlock: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2057ad363772..b4ed1a301232 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -7198,16 +7198,15 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op; int bit; + /* + * The ftrace_test_and_set_recursion() will disable preemption, + * which is required since some of the ops may be dynamically + * allocated, they must be freed after a synchronize_rcu(). + */ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; - /* - * Some of the ops may be dynamically allocated, - * they must be freed after a synchronize_rcu(). - */ - preempt_disable_notrace(); - do_for_each_ftrace_op(op, ftrace_ops_list) { /* Stub functions don't need to be called nor tested */ if (op->flags & FTRACE_OPS_FL_STUB) @@ -7231,7 +7230,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, } } while_for_each_ftrace_op(op); out: - preempt_enable_notrace(); trace_clear_recursion(bit); } @@ -7279,12 +7277,9 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); - if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) op->func(ip, parent_ip, op, fregs); - preempt_enable_notrace(); trace_clear_recursion(bit); } NOKPROBE_SYMBOL(ftrace_ops_assist_func); diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 1f0e63f5d1f9..9f1bfbe105e8 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -186,7 +186,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, return; trace_ctx = tracing_gen_ctx(); - preempt_disable_notrace(); cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); @@ -194,7 +193,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, trace_function(tr, ip, parent_ip, trace_ctx); ftrace_test_recursion_unlock(bit); - preempt_enable_notrace(); } #ifdef CONFIG_UNWINDER_ORC @@ -298,8 +296,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); - cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); if (atomic_read(&data->disabled)) @@ -324,7 +320,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, out: ftrace_test_recursion_unlock(bit); - preempt_enable_notrace(); } static void -- cgit v1.2.3-71-gd317 From e368cd72880360ffe9b298349ae96286dd121499 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Tue, 21 Dec 2021 06:57:45 -0800 Subject: Documentation: livepatch: Add livepatch API page The livepatch subsystem has several exported functions and objects with kerneldoc comments. Though the livepatch documentation contains handwritten descriptions of all of these exported functions, they are currently not pulled into the docs build using the kernel-doc directive. In order to allow readers of the documentation to see the full kerneldoc comments in the generated documentation files, this change adds a new Documentation/livepatch/api.rst page which contains kernel-doc directives to link the kerneldoc comments directly in the documentation. With this, all of the hand-written descriptions of the APIs now cross-reference the kerneldoc comments on the new Livepatching APIs page, and running ./scripts/find-unused-docs.sh on kernel/livepatch no longer shows any files as missing documentation. Note that all of the handwritten API descriptions were left alone with the exception of Documentation/livepatch/system-state.rst, which was updated to allow the cross-referencing to work correctly. The file now follows the cross-referencing formatting guidance specified in Documentation/doc-guide/kernel-doc.rst. Furthermore, some comments around klp_shadow_free_all() were updated to say <_, id> rather than <*, id> to match the rest of the file, and to prevent the docs build from emitting an "Inline emphasis start-string without end string" error. Signed-off-by: David Vernet Reviewed-by: Petr Mladek Acked-by: Miroslav Benes Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211221145743.4098360-1-void@manifault.com --- Documentation/livepatch/api.rst | 30 ++++++++++++++++++++++++++++++ Documentation/livepatch/index.rst | 1 + Documentation/livepatch/shadow-vars.rst | 4 ++-- Documentation/livepatch/system-state.rst | 4 ++-- kernel/livepatch/shadow.c | 6 +++--- 5 files changed, 38 insertions(+), 7 deletions(-) create mode 100644 Documentation/livepatch/api.rst (limited to 'kernel/livepatch') diff --git a/Documentation/livepatch/api.rst b/Documentation/livepatch/api.rst new file mode 100644 index 000000000000..78944b63d74b --- /dev/null +++ b/Documentation/livepatch/api.rst @@ -0,0 +1,30 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Livepatching APIs +================= + +Livepatch Enablement +==================== + +.. kernel-doc:: kernel/livepatch/core.c + :export: + + +Shadow Variables +================ + +.. kernel-doc:: kernel/livepatch/shadow.c + :export: + +System State Changes +==================== + +.. kernel-doc:: kernel/livepatch/state.c + :export: + +Object Types +============ + +.. kernel-doc:: include/linux/livepatch.h + :identifiers: klp_patch klp_object klp_func klp_callbacks klp_state diff --git a/Documentation/livepatch/index.rst b/Documentation/livepatch/index.rst index 43cce5fad705..cebf1c71d4a5 100644 --- a/Documentation/livepatch/index.rst +++ b/Documentation/livepatch/index.rst @@ -14,6 +14,7 @@ Kernel Livepatching shadow-vars system-state reliable-stacktrace + api .. only:: subproject and html diff --git a/Documentation/livepatch/shadow-vars.rst b/Documentation/livepatch/shadow-vars.rst index 6a7d43a8787d..7a7098bfb5c8 100644 --- a/Documentation/livepatch/shadow-vars.rst +++ b/Documentation/livepatch/shadow-vars.rst @@ -82,8 +82,8 @@ to do actions that can be done only once when a new variable is allocated. - call destructor function if defined - free shadow variable -* klp_shadow_free_all() - detach and free all <*, id> shadow variables - - find and remove any <*, id> references from global hashtable +* klp_shadow_free_all() - detach and free all <_, id> shadow variables + - find and remove any <_, id> references from global hashtable - if found diff --git a/Documentation/livepatch/system-state.rst b/Documentation/livepatch/system-state.rst index c6d127c2d9aa..7a3935fd812b 100644 --- a/Documentation/livepatch/system-state.rst +++ b/Documentation/livepatch/system-state.rst @@ -52,12 +52,12 @@ struct klp_state: The state can be manipulated using two functions: - - *klp_get_state(patch, id)* + - klp_get_state() - Get struct klp_state associated with the given livepatch and state id. - - *klp_get_prev_state(id)* + - klp_get_prev_state() - Get struct klp_state associated with the given feature id and already installed livepatches. diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c index e5c9fb295ba9..c2e724d97ddf 100644 --- a/kernel/livepatch/shadow.c +++ b/kernel/livepatch/shadow.c @@ -272,12 +272,12 @@ void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) EXPORT_SYMBOL_GPL(klp_shadow_free); /** - * klp_shadow_free_all() - detach and free all <*, id> shadow variables + * klp_shadow_free_all() - detach and free all <_, id> shadow variables * @id: data identifier * @dtor: custom callback that can be used to unregister the variable * and/or free data that the shadow variable points to (optional) * - * This function releases the memory for all <*, id> shadow variable + * This function releases the memory for all <_, id> shadow variable * instances, callers should stop referencing them accordingly. */ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) @@ -288,7 +288,7 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) spin_lock_irqsave(&klp_shadow_lock, flags); - /* Delete all <*, id> from hash */ + /* Delete all <_, id> from hash */ hash_for_each(klp_shadow_hash, i, shadow, node) { if (klp_shadow_match(shadow, shadow->obj, id)) klp_shadow_free_struct(shadow, dtor); -- cgit v1.2.3-71-gd317 From 5ef3dd20555e8e878ac390a71e658db5fd02845c Mon Sep 17 00:00:00 2001 From: David Vernet Date: Tue, 21 Dec 2021 07:39:31 -0800 Subject: livepatch: Fix kobject refcount bug on klp_init_patch_early failure path When enabling a klp patch with klp_enable_patch(), klp_init_patch_early() is invoked to initialize the kobjects for the patch itself, as well as the 'struct klp_object' and 'struct klp_func' objects that comprise it. However, there are some error paths in klp_enable_patch() where some kobjects may have been initialized with kobject_init(), but an error code is still returned due to e.g. a 'struct klp_object' having a NULL funcs pointer. In these paths, the initial reference of the kobject of the 'struct klp_patch' may never be released, along with one or more of its objects and their functions, as kobject_put() is not invoked on the cleanup path if klp_init_patch_early() returns an error code. For example, if an object entry such as the following were added to the sample livepatch module's klp patch, it would cause the vmlinux klp_object, and its klp_func which updates 'cmdline_proc_show', to never be released: static struct klp_object objs[] = { { /* name being NULL means vmlinux */ .funcs = funcs, }, { /* NULL funcs -- would cause reference leak */ .name = "kvm", }, { } }; Without this change, if CONFIG_DEBUG_KOBJECT is enabled, and the sample klp patch is loaded, the kobjects (the patch, the vmlinux 'struct klp_object', and its func) are observed as initialized, but never released, in the dmesg log output. With the change, these kobject references no longer fail to be released as the error case is properly handled before they are initialized. Signed-off-by: David Vernet Reviewed-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Petr Mladek --- kernel/livepatch/core.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 335d988bd811..7d228cdb44c5 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -862,14 +862,11 @@ static void klp_init_object_early(struct klp_patch *patch, list_add_tail(&obj->node, &patch->obj_list); } -static int klp_init_patch_early(struct klp_patch *patch) +static void klp_init_patch_early(struct klp_patch *patch) { struct klp_object *obj; struct klp_func *func; - if (!patch->objs) - return -EINVAL; - INIT_LIST_HEAD(&patch->list); INIT_LIST_HEAD(&patch->obj_list); kobject_init(&patch->kobj, &klp_ktype_patch); @@ -879,20 +876,12 @@ static int klp_init_patch_early(struct klp_patch *patch) init_completion(&patch->finish); klp_for_each_object_static(patch, obj) { - if (!obj->funcs) - return -EINVAL; - klp_init_object_early(patch, obj); klp_for_each_func_static(obj, func) { klp_init_func_early(obj, func); } } - - if (!try_module_get(patch->mod)) - return -ENODEV; - - return 0; } static int klp_init_patch(struct klp_patch *patch) @@ -1024,10 +1013,17 @@ err: int klp_enable_patch(struct klp_patch *patch) { int ret; + struct klp_object *obj; - if (!patch || !patch->mod) + if (!patch || !patch->mod || !patch->objs) return -EINVAL; + klp_for_each_object_static(patch, obj) { + if (!obj->funcs) + return -EINVAL; + } + + if (!is_livepatch_module(patch->mod)) { pr_err("module %s is not marked as a livepatch module\n", patch->mod->name); @@ -1051,11 +1047,10 @@ int klp_enable_patch(struct klp_patch *patch) return -EINVAL; } - ret = klp_init_patch_early(patch); - if (ret) { - mutex_unlock(&klp_mutex); - return ret; - } + if (!try_module_get(patch->mod)) + return -ENODEV; + + klp_init_patch_early(patch); ret = klp_init_patch(patch); if (ret) -- cgit v1.2.3-71-gd317 From 50a0f3f55e382b313e7cbebdf8ccf1593296e16f Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Sat, 25 Dec 2021 10:51:15 +0800 Subject: livepatch: Fix missing unlock on error in klp_enable_patch() Add missing unlock when try_module_get() fails in klp_enable_patch(). Fixes: 5ef3dd20555e8e8 ("livepatch: Fix kobject refcount bug on klp_init_patch_early failure path") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Acked-by: David Vernet Reviewed-by: Petr Mladek Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211225025115.475348-1-yangyingliang@huawei.com --- kernel/livepatch/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 7d228cdb44c5..585494ec464f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1047,8 +1047,10 @@ int klp_enable_patch(struct klp_patch *patch) return -EINVAL; } - if (!try_module_get(patch->mod)) + if (!try_module_get(patch->mod)) { + mutex_unlock(&klp_mutex); return -ENODEV; + } klp_init_patch_early(patch); -- cgit v1.2.3-71-gd317