cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (31115B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * core.c - Kernel Live Patching Core
      4 *
      5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
      6 * Copyright (C) 2014 SUSE
      7 */
      8
      9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     10
     11#include <linux/module.h>
     12#include <linux/kernel.h>
     13#include <linux/mutex.h>
     14#include <linux/slab.h>
     15#include <linux/list.h>
     16#include <linux/kallsyms.h>
     17#include <linux/livepatch.h>
     18#include <linux/elf.h>
     19#include <linux/moduleloader.h>
     20#include <linux/completion.h>
     21#include <linux/memory.h>
     22#include <linux/rcupdate.h>
     23#include <asm/cacheflush.h>
     24#include "core.h"
     25#include "patch.h"
     26#include "state.h"
     27#include "transition.h"
     28
     29/*
     30 * klp_mutex is a coarse lock which serializes access to klp data.  All
     31 * accesses to klp-related variables and structures must have mutex protection,
     32 * except within the following functions which carefully avoid the need for it:
     33 *
     34 * - klp_ftrace_handler()
     35 * - klp_update_patch_state()
     36 */
     37DEFINE_MUTEX(klp_mutex);
     38
     39/*
     40 * Actively used patches: enabled or in transition. Note that replaced
     41 * or disabled patches are not listed even though the related kernel
     42 * module still can be loaded.
     43 */
     44LIST_HEAD(klp_patches);
     45
     46static struct kobject *klp_root_kobj;
     47
     48static bool klp_is_module(struct klp_object *obj)
     49{
     50	return obj->name;
     51}
     52
     53/* sets obj->mod if object is not vmlinux and module is found */
     54static void klp_find_object_module(struct klp_object *obj)
     55{
     56	struct module *mod;
     57
     58	if (!klp_is_module(obj))
     59		return;
     60
     61	rcu_read_lock_sched();
     62	/*
     63	 * We do not want to block removal of patched modules and therefore
     64	 * we do not take a reference here. The patches are removed by
     65	 * klp_module_going() instead.
     66	 */
     67	mod = find_module(obj->name);
     68	/*
     69	 * Do not mess work of klp_module_coming() and klp_module_going().
     70	 * Note that the patch might still be needed before klp_module_going()
     71	 * is called. Module functions can be called even in the GOING state
     72	 * until mod->exit() finishes. This is especially important for
     73	 * patches that modify semantic of the functions.
     74	 */
     75	if (mod && mod->klp_alive)
     76		obj->mod = mod;
     77
     78	rcu_read_unlock_sched();
     79}
     80
     81static bool klp_initialized(void)
     82{
     83	return !!klp_root_kobj;
     84}
     85
     86static struct klp_func *klp_find_func(struct klp_object *obj,
     87				      struct klp_func *old_func)
     88{
     89	struct klp_func *func;
     90
     91	klp_for_each_func(obj, func) {
     92		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
     93		    (old_func->old_sympos == func->old_sympos)) {
     94			return func;
     95		}
     96	}
     97
     98	return NULL;
     99}
    100
    101static struct klp_object *klp_find_object(struct klp_patch *patch,
    102					  struct klp_object *old_obj)
    103{
    104	struct klp_object *obj;
    105
    106	klp_for_each_object(patch, obj) {
    107		if (klp_is_module(old_obj)) {
    108			if (klp_is_module(obj) &&
    109			    strcmp(old_obj->name, obj->name) == 0) {
    110				return obj;
    111			}
    112		} else if (!klp_is_module(obj)) {
    113			return obj;
    114		}
    115	}
    116
    117	return NULL;
    118}
    119
    120struct klp_find_arg {
    121	const char *objname;
    122	const char *name;
    123	unsigned long addr;
    124	unsigned long count;
    125	unsigned long pos;
    126};
    127
    128static int klp_find_callback(void *data, const char *name,
    129			     struct module *mod, unsigned long addr)
    130{
    131	struct klp_find_arg *args = data;
    132
    133	if ((mod && !args->objname) || (!mod && args->objname))
    134		return 0;
    135
    136	if (strcmp(args->name, name))
    137		return 0;
    138
    139	if (args->objname && strcmp(args->objname, mod->name))
    140		return 0;
    141
    142	args->addr = addr;
    143	args->count++;
    144
    145	/*
    146	 * Finish the search when the symbol is found for the desired position
    147	 * or the position is not defined for a non-unique symbol.
    148	 */
    149	if ((args->pos && (args->count == args->pos)) ||
    150	    (!args->pos && (args->count > 1)))
    151		return 1;
    152
    153	return 0;
    154}
    155
    156static int klp_find_object_symbol(const char *objname, const char *name,
    157				  unsigned long sympos, unsigned long *addr)
    158{
    159	struct klp_find_arg args = {
    160		.objname = objname,
    161		.name = name,
    162		.addr = 0,
    163		.count = 0,
    164		.pos = sympos,
    165	};
    166
    167	if (objname)
    168		module_kallsyms_on_each_symbol(klp_find_callback, &args);
    169	else
    170		kallsyms_on_each_symbol(klp_find_callback, &args);
    171
    172	/*
    173	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
    174	 * otherwise ensure the symbol position count matches sympos.
    175	 */
    176	if (args.addr == 0)
    177		pr_err("symbol '%s' not found in symbol table\n", name);
    178	else if (args.count > 1 && sympos == 0) {
    179		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
    180		       name, objname);
    181	} else if (sympos != args.count && sympos > 0) {
    182		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
    183		       sympos, name, objname ? objname : "vmlinux");
    184	} else {
    185		*addr = args.addr;
    186		return 0;
    187	}
    188
    189	*addr = 0;
    190	return -EINVAL;
    191}
    192
    193static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
    194			       unsigned int symndx, Elf_Shdr *relasec,
    195			       const char *sec_objname)
    196{
    197	int i, cnt, ret;
    198	char sym_objname[MODULE_NAME_LEN];
    199	char sym_name[KSYM_NAME_LEN];
    200	Elf_Rela *relas;
    201	Elf_Sym *sym;
    202	unsigned long sympos, addr;
    203	bool sym_vmlinux;
    204	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
    205
    206	/*
    207	 * Since the field widths for sym_objname and sym_name in the sscanf()
    208	 * call are hard-coded and correspond to MODULE_NAME_LEN and
    209	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
    210	 * and KSYM_NAME_LEN have the values we expect them to have.
    211	 *
    212	 * Because the value of MODULE_NAME_LEN can differ among architectures,
    213	 * we use the smallest/strictest upper bound possible (56, based on
    214	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
    215	 */
    216	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
    217
    218	relas = (Elf_Rela *) relasec->sh_addr;
    219	/* For each rela in this klp relocation section */
    220	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
    221		sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
    222		if (sym->st_shndx != SHN_LIVEPATCH) {
    223			pr_err("symbol %s is not marked as a livepatch symbol\n",
    224			       strtab + sym->st_name);
    225			return -EINVAL;
    226		}
    227
    228		/* Format: .klp.sym.sym_objname.sym_name,sympos */
    229		cnt = sscanf(strtab + sym->st_name,
    230			     ".klp.sym.%55[^.].%127[^,],%lu",
    231			     sym_objname, sym_name, &sympos);
    232		if (cnt != 3) {
    233			pr_err("symbol %s has an incorrectly formatted name\n",
    234			       strtab + sym->st_name);
    235			return -EINVAL;
    236		}
    237
    238		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
    239
    240		/*
    241		 * Prevent module-specific KLP rela sections from referencing
    242		 * vmlinux symbols.  This helps prevent ordering issues with
    243		 * module special section initializations.  Presumably such
    244		 * symbols are exported and normal relas can be used instead.
    245		 */
    246		if (!sec_vmlinux && sym_vmlinux) {
    247			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
    248			       sym_name);
    249			return -EINVAL;
    250		}
    251
    252		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
    253		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
    254					     sym_name, sympos, &addr);
    255		if (ret)
    256			return ret;
    257
    258		sym->st_value = addr;
    259	}
    260
    261	return 0;
    262}
    263
    264/*
    265 * At a high-level, there are two types of klp relocation sections: those which
    266 * reference symbols which live in vmlinux; and those which reference symbols
    267 * which live in other modules.  This function is called for both types:
    268 *
    269 * 1) When a klp module itself loads, the module code calls this function to
    270 *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
    271 *    These relocations are written to the klp module text to allow the patched
    272 *    code/data to reference unexported vmlinux symbols.  They're written as
    273 *    early as possible to ensure that other module init code (.e.g.,
    274 *    jump_label_apply_nops) can access any unexported vmlinux symbols which
    275 *    might be referenced by the klp module's special sections.
    276 *
    277 * 2) When a to-be-patched module loads -- or is already loaded when a
    278 *    corresponding klp module loads -- klp code calls this function to write
    279 *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
    280 *    are written to the klp module text to allow the patched code/data to
    281 *    reference symbols which live in the to-be-patched module or one of its
    282 *    module dependencies.  Exported symbols are supported, in addition to
    283 *    unexported symbols, in order to enable late module patching, which allows
    284 *    the to-be-patched module to be loaded and patched sometime *after* the
    285 *    klp module is loaded.
    286 */
    287int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
    288			     const char *shstrtab, const char *strtab,
    289			     unsigned int symndx, unsigned int secndx,
    290			     const char *objname)
    291{
    292	int cnt, ret;
    293	char sec_objname[MODULE_NAME_LEN];
    294	Elf_Shdr *sec = sechdrs + secndx;
    295
    296	/*
    297	 * Format: .klp.rela.sec_objname.section_name
    298	 * See comment in klp_resolve_symbols() for an explanation
    299	 * of the selected field width value.
    300	 */
    301	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
    302		     sec_objname);
    303	if (cnt != 1) {
    304		pr_err("section %s has an incorrectly formatted name\n",
    305		       shstrtab + sec->sh_name);
    306		return -EINVAL;
    307	}
    308
    309	if (strcmp(objname ? objname : "vmlinux", sec_objname))
    310		return 0;
    311
    312	ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
    313	if (ret)
    314		return ret;
    315
    316	return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
    317}
    318
    319/*
    320 * Sysfs Interface
    321 *
    322 * /sys/kernel/livepatch
    323 * /sys/kernel/livepatch/<patch>
    324 * /sys/kernel/livepatch/<patch>/enabled
    325 * /sys/kernel/livepatch/<patch>/transition
    326 * /sys/kernel/livepatch/<patch>/force
    327 * /sys/kernel/livepatch/<patch>/<object>
    328 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
    329 */
    330static int __klp_disable_patch(struct klp_patch *patch);
    331
    332static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
    333			     const char *buf, size_t count)
    334{
    335	struct klp_patch *patch;
    336	int ret;
    337	bool enabled;
    338
    339	ret = kstrtobool(buf, &enabled);
    340	if (ret)
    341		return ret;
    342
    343	patch = container_of(kobj, struct klp_patch, kobj);
    344
    345	mutex_lock(&klp_mutex);
    346
    347	if (patch->enabled == enabled) {
    348		/* already in requested state */
    349		ret = -EINVAL;
    350		goto out;
    351	}
    352
    353	/*
    354	 * Allow to reverse a pending transition in both ways. It might be
    355	 * necessary to complete the transition without forcing and breaking
    356	 * the system integrity.
    357	 *
    358	 * Do not allow to re-enable a disabled patch.
    359	 */
    360	if (patch == klp_transition_patch)
    361		klp_reverse_transition();
    362	else if (!enabled)
    363		ret = __klp_disable_patch(patch);
    364	else
    365		ret = -EINVAL;
    366
    367out:
    368	mutex_unlock(&klp_mutex);
    369
    370	if (ret)
    371		return ret;
    372	return count;
    373}
    374
    375static ssize_t enabled_show(struct kobject *kobj,
    376			    struct kobj_attribute *attr, char *buf)
    377{
    378	struct klp_patch *patch;
    379
    380	patch = container_of(kobj, struct klp_patch, kobj);
    381	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
    382}
    383
    384static ssize_t transition_show(struct kobject *kobj,
    385			       struct kobj_attribute *attr, char *buf)
    386{
    387	struct klp_patch *patch;
    388
    389	patch = container_of(kobj, struct klp_patch, kobj);
    390	return snprintf(buf, PAGE_SIZE-1, "%d\n",
    391			patch == klp_transition_patch);
    392}
    393
    394static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
    395			   const char *buf, size_t count)
    396{
    397	struct klp_patch *patch;
    398	int ret;
    399	bool val;
    400
    401	ret = kstrtobool(buf, &val);
    402	if (ret)
    403		return ret;
    404
    405	if (!val)
    406		return count;
    407
    408	mutex_lock(&klp_mutex);
    409
    410	patch = container_of(kobj, struct klp_patch, kobj);
    411	if (patch != klp_transition_patch) {
    412		mutex_unlock(&klp_mutex);
    413		return -EINVAL;
    414	}
    415
    416	klp_force_transition();
    417
    418	mutex_unlock(&klp_mutex);
    419
    420	return count;
    421}
    422
    423static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
    424static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
    425static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
    426static struct attribute *klp_patch_attrs[] = {
    427	&enabled_kobj_attr.attr,
    428	&transition_kobj_attr.attr,
    429	&force_kobj_attr.attr,
    430	NULL
    431};
    432ATTRIBUTE_GROUPS(klp_patch);
    433
    434static void klp_free_object_dynamic(struct klp_object *obj)
    435{
    436	kfree(obj->name);
    437	kfree(obj);
    438}
    439
    440static void klp_init_func_early(struct klp_object *obj,
    441				struct klp_func *func);
    442static void klp_init_object_early(struct klp_patch *patch,
    443				  struct klp_object *obj);
    444
    445static struct klp_object *klp_alloc_object_dynamic(const char *name,
    446						   struct klp_patch *patch)
    447{
    448	struct klp_object *obj;
    449
    450	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
    451	if (!obj)
    452		return NULL;
    453
    454	if (name) {
    455		obj->name = kstrdup(name, GFP_KERNEL);
    456		if (!obj->name) {
    457			kfree(obj);
    458			return NULL;
    459		}
    460	}
    461
    462	klp_init_object_early(patch, obj);
    463	obj->dynamic = true;
    464
    465	return obj;
    466}
    467
    468static void klp_free_func_nop(struct klp_func *func)
    469{
    470	kfree(func->old_name);
    471	kfree(func);
    472}
    473
    474static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
    475					   struct klp_object *obj)
    476{
    477	struct klp_func *func;
    478
    479	func = kzalloc(sizeof(*func), GFP_KERNEL);
    480	if (!func)
    481		return NULL;
    482
    483	if (old_func->old_name) {
    484		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
    485		if (!func->old_name) {
    486			kfree(func);
    487			return NULL;
    488		}
    489	}
    490
    491	klp_init_func_early(obj, func);
    492	/*
    493	 * func->new_func is same as func->old_func. These addresses are
    494	 * set when the object is loaded, see klp_init_object_loaded().
    495	 */
    496	func->old_sympos = old_func->old_sympos;
    497	func->nop = true;
    498
    499	return func;
    500}
    501
    502static int klp_add_object_nops(struct klp_patch *patch,
    503			       struct klp_object *old_obj)
    504{
    505	struct klp_object *obj;
    506	struct klp_func *func, *old_func;
    507
    508	obj = klp_find_object(patch, old_obj);
    509
    510	if (!obj) {
    511		obj = klp_alloc_object_dynamic(old_obj->name, patch);
    512		if (!obj)
    513			return -ENOMEM;
    514	}
    515
    516	klp_for_each_func(old_obj, old_func) {
    517		func = klp_find_func(obj, old_func);
    518		if (func)
    519			continue;
    520
    521		func = klp_alloc_func_nop(old_func, obj);
    522		if (!func)
    523			return -ENOMEM;
    524	}
    525
    526	return 0;
    527}
    528
    529/*
    530 * Add 'nop' functions which simply return to the caller to run
    531 * the original function. The 'nop' functions are added to a
    532 * patch to facilitate a 'replace' mode.
    533 */
    534static int klp_add_nops(struct klp_patch *patch)
    535{
    536	struct klp_patch *old_patch;
    537	struct klp_object *old_obj;
    538
    539	klp_for_each_patch(old_patch) {
    540		klp_for_each_object(old_patch, old_obj) {
    541			int err;
    542
    543			err = klp_add_object_nops(patch, old_obj);
    544			if (err)
    545				return err;
    546		}
    547	}
    548
    549	return 0;
    550}
    551
    552static void klp_kobj_release_patch(struct kobject *kobj)
    553{
    554	struct klp_patch *patch;
    555
    556	patch = container_of(kobj, struct klp_patch, kobj);
    557	complete(&patch->finish);
    558}
    559
    560static struct kobj_type klp_ktype_patch = {
    561	.release = klp_kobj_release_patch,
    562	.sysfs_ops = &kobj_sysfs_ops,
    563	.default_groups = klp_patch_groups,
    564};
    565
    566static void klp_kobj_release_object(struct kobject *kobj)
    567{
    568	struct klp_object *obj;
    569
    570	obj = container_of(kobj, struct klp_object, kobj);
    571
    572	if (obj->dynamic)
    573		klp_free_object_dynamic(obj);
    574}
    575
    576static struct kobj_type klp_ktype_object = {
    577	.release = klp_kobj_release_object,
    578	.sysfs_ops = &kobj_sysfs_ops,
    579};
    580
    581static void klp_kobj_release_func(struct kobject *kobj)
    582{
    583	struct klp_func *func;
    584
    585	func = container_of(kobj, struct klp_func, kobj);
    586
    587	if (func->nop)
    588		klp_free_func_nop(func);
    589}
    590
    591static struct kobj_type klp_ktype_func = {
    592	.release = klp_kobj_release_func,
    593	.sysfs_ops = &kobj_sysfs_ops,
    594};
    595
    596static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
    597{
    598	struct klp_func *func, *tmp_func;
    599
    600	klp_for_each_func_safe(obj, func, tmp_func) {
    601		if (nops_only && !func->nop)
    602			continue;
    603
    604		list_del(&func->node);
    605		kobject_put(&func->kobj);
    606	}
    607}
    608
    609/* Clean up when a patched object is unloaded */
    610static void klp_free_object_loaded(struct klp_object *obj)
    611{
    612	struct klp_func *func;
    613
    614	obj->mod = NULL;
    615
    616	klp_for_each_func(obj, func) {
    617		func->old_func = NULL;
    618
    619		if (func->nop)
    620			func->new_func = NULL;
    621	}
    622}
    623
    624static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
    625{
    626	struct klp_object *obj, *tmp_obj;
    627
    628	klp_for_each_object_safe(patch, obj, tmp_obj) {
    629		__klp_free_funcs(obj, nops_only);
    630
    631		if (nops_only && !obj->dynamic)
    632			continue;
    633
    634		list_del(&obj->node);
    635		kobject_put(&obj->kobj);
    636	}
    637}
    638
    639static void klp_free_objects(struct klp_patch *patch)
    640{
    641	__klp_free_objects(patch, false);
    642}
    643
    644static void klp_free_objects_dynamic(struct klp_patch *patch)
    645{
    646	__klp_free_objects(patch, true);
    647}
    648
    649/*
    650 * This function implements the free operations that can be called safely
    651 * under klp_mutex.
    652 *
    653 * The operation must be completed by calling klp_free_patch_finish()
    654 * outside klp_mutex.
    655 */
    656static void klp_free_patch_start(struct klp_patch *patch)
    657{
    658	if (!list_empty(&patch->list))
    659		list_del(&patch->list);
    660
    661	klp_free_objects(patch);
    662}
    663
    664/*
    665 * This function implements the free part that must be called outside
    666 * klp_mutex.
    667 *
    668 * It must be called after klp_free_patch_start(). And it has to be
    669 * the last function accessing the livepatch structures when the patch
    670 * gets disabled.
    671 */
    672static void klp_free_patch_finish(struct klp_patch *patch)
    673{
    674	/*
    675	 * Avoid deadlock with enabled_store() sysfs callback by
    676	 * calling this outside klp_mutex. It is safe because
    677	 * this is called when the patch gets disabled and it
    678	 * cannot get enabled again.
    679	 */
    680	kobject_put(&patch->kobj);
    681	wait_for_completion(&patch->finish);
    682
    683	/* Put the module after the last access to struct klp_patch. */
    684	if (!patch->forced)
    685		module_put(patch->mod);
    686}
    687
    688/*
    689 * The livepatch might be freed from sysfs interface created by the patch.
    690 * This work allows to wait until the interface is destroyed in a separate
    691 * context.
    692 */
    693static void klp_free_patch_work_fn(struct work_struct *work)
    694{
    695	struct klp_patch *patch =
    696		container_of(work, struct klp_patch, free_work);
    697
    698	klp_free_patch_finish(patch);
    699}
    700
    701void klp_free_patch_async(struct klp_patch *patch)
    702{
    703	klp_free_patch_start(patch);
    704	schedule_work(&patch->free_work);
    705}
    706
    707void klp_free_replaced_patches_async(struct klp_patch *new_patch)
    708{
    709	struct klp_patch *old_patch, *tmp_patch;
    710
    711	klp_for_each_patch_safe(old_patch, tmp_patch) {
    712		if (old_patch == new_patch)
    713			return;
    714		klp_free_patch_async(old_patch);
    715	}
    716}
    717
    718static int klp_init_func(struct klp_object *obj, struct klp_func *func)
    719{
    720	if (!func->old_name)
    721		return -EINVAL;
    722
    723	/*
    724	 * NOPs get the address later. The patched module must be loaded,
    725	 * see klp_init_object_loaded().
    726	 */
    727	if (!func->new_func && !func->nop)
    728		return -EINVAL;
    729
    730	if (strlen(func->old_name) >= KSYM_NAME_LEN)
    731		return -EINVAL;
    732
    733	INIT_LIST_HEAD(&func->stack_node);
    734	func->patched = false;
    735	func->transition = false;
    736
    737	/* The format for the sysfs directory is <function,sympos> where sympos
    738	 * is the nth occurrence of this symbol in kallsyms for the patched
    739	 * object. If the user selects 0 for old_sympos, then 1 will be used
    740	 * since a unique symbol will be the first occurrence.
    741	 */
    742	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
    743			   func->old_name,
    744			   func->old_sympos ? func->old_sympos : 1);
    745}
    746
    747static int klp_apply_object_relocs(struct klp_patch *patch,
    748				   struct klp_object *obj)
    749{
    750	int i, ret;
    751	struct klp_modinfo *info = patch->mod->klp_info;
    752
    753	for (i = 1; i < info->hdr.e_shnum; i++) {
    754		Elf_Shdr *sec = info->sechdrs + i;
    755
    756		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
    757			continue;
    758
    759		ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
    760					       info->secstrings,
    761					       patch->mod->core_kallsyms.strtab,
    762					       info->symndx, i, obj->name);
    763		if (ret)
    764			return ret;
    765	}
    766
    767	return 0;
    768}
    769
    770/* parts of the initialization that is done only when the object is loaded */
    771static int klp_init_object_loaded(struct klp_patch *patch,
    772				  struct klp_object *obj)
    773{
    774	struct klp_func *func;
    775	int ret;
    776
    777	if (klp_is_module(obj)) {
    778		/*
    779		 * Only write module-specific relocations here
    780		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
    781		 * written earlier during the initialization of the klp module
    782		 * itself.
    783		 */
    784		ret = klp_apply_object_relocs(patch, obj);
    785		if (ret)
    786			return ret;
    787	}
    788
    789	klp_for_each_func(obj, func) {
    790		ret = klp_find_object_symbol(obj->name, func->old_name,
    791					     func->old_sympos,
    792					     (unsigned long *)&func->old_func);
    793		if (ret)
    794			return ret;
    795
    796		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
    797						  &func->old_size, NULL);
    798		if (!ret) {
    799			pr_err("kallsyms size lookup failed for '%s'\n",
    800			       func->old_name);
    801			return -ENOENT;
    802		}
    803
    804		if (func->nop)
    805			func->new_func = func->old_func;
    806
    807		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
    808						  &func->new_size, NULL);
    809		if (!ret) {
    810			pr_err("kallsyms size lookup failed for '%s' replacement\n",
    811			       func->old_name);
    812			return -ENOENT;
    813		}
    814	}
    815
    816	return 0;
    817}
    818
    819static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
    820{
    821	struct klp_func *func;
    822	int ret;
    823	const char *name;
    824
    825	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
    826		return -EINVAL;
    827
    828	obj->patched = false;
    829	obj->mod = NULL;
    830
    831	klp_find_object_module(obj);
    832
    833	name = klp_is_module(obj) ? obj->name : "vmlinux";
    834	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
    835	if (ret)
    836		return ret;
    837
    838	klp_for_each_func(obj, func) {
    839		ret = klp_init_func(obj, func);
    840		if (ret)
    841			return ret;
    842	}
    843
    844	if (klp_is_object_loaded(obj))
    845		ret = klp_init_object_loaded(patch, obj);
    846
    847	return ret;
    848}
    849
    850static void klp_init_func_early(struct klp_object *obj,
    851				struct klp_func *func)
    852{
    853	kobject_init(&func->kobj, &klp_ktype_func);
    854	list_add_tail(&func->node, &obj->func_list);
    855}
    856
    857static void klp_init_object_early(struct klp_patch *patch,
    858				  struct klp_object *obj)
    859{
    860	INIT_LIST_HEAD(&obj->func_list);
    861	kobject_init(&obj->kobj, &klp_ktype_object);
    862	list_add_tail(&obj->node, &patch->obj_list);
    863}
    864
    865static void klp_init_patch_early(struct klp_patch *patch)
    866{
    867	struct klp_object *obj;
    868	struct klp_func *func;
    869
    870	INIT_LIST_HEAD(&patch->list);
    871	INIT_LIST_HEAD(&patch->obj_list);
    872	kobject_init(&patch->kobj, &klp_ktype_patch);
    873	patch->enabled = false;
    874	patch->forced = false;
    875	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
    876	init_completion(&patch->finish);
    877
    878	klp_for_each_object_static(patch, obj) {
    879		klp_init_object_early(patch, obj);
    880
    881		klp_for_each_func_static(obj, func) {
    882			klp_init_func_early(obj, func);
    883		}
    884	}
    885}
    886
    887static int klp_init_patch(struct klp_patch *patch)
    888{
    889	struct klp_object *obj;
    890	int ret;
    891
    892	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
    893	if (ret)
    894		return ret;
    895
    896	if (patch->replace) {
    897		ret = klp_add_nops(patch);
    898		if (ret)
    899			return ret;
    900	}
    901
    902	klp_for_each_object(patch, obj) {
    903		ret = klp_init_object(patch, obj);
    904		if (ret)
    905			return ret;
    906	}
    907
    908	list_add_tail(&patch->list, &klp_patches);
    909
    910	return 0;
    911}
    912
    913static int __klp_disable_patch(struct klp_patch *patch)
    914{
    915	struct klp_object *obj;
    916
    917	if (WARN_ON(!patch->enabled))
    918		return -EINVAL;
    919
    920	if (klp_transition_patch)
    921		return -EBUSY;
    922
    923	klp_init_transition(patch, KLP_UNPATCHED);
    924
    925	klp_for_each_object(patch, obj)
    926		if (obj->patched)
    927			klp_pre_unpatch_callback(obj);
    928
    929	/*
    930	 * Enforce the order of the func->transition writes in
    931	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
    932	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
    933	 * is called shortly after klp_update_patch_state() switches the task,
    934	 * this ensures the handler sees that func->transition is set.
    935	 */
    936	smp_wmb();
    937
    938	klp_start_transition();
    939	patch->enabled = false;
    940	klp_try_complete_transition();
    941
    942	return 0;
    943}
    944
    945static int __klp_enable_patch(struct klp_patch *patch)
    946{
    947	struct klp_object *obj;
    948	int ret;
    949
    950	if (klp_transition_patch)
    951		return -EBUSY;
    952
    953	if (WARN_ON(patch->enabled))
    954		return -EINVAL;
    955
    956	pr_notice("enabling patch '%s'\n", patch->mod->name);
    957
    958	klp_init_transition(patch, KLP_PATCHED);
    959
    960	/*
    961	 * Enforce the order of the func->transition writes in
    962	 * klp_init_transition() and the ops->func_stack writes in
    963	 * klp_patch_object(), so that klp_ftrace_handler() will see the
    964	 * func->transition updates before the handler is registered and the
    965	 * new funcs become visible to the handler.
    966	 */
    967	smp_wmb();
    968
    969	klp_for_each_object(patch, obj) {
    970		if (!klp_is_object_loaded(obj))
    971			continue;
    972
    973		ret = klp_pre_patch_callback(obj);
    974		if (ret) {
    975			pr_warn("pre-patch callback failed for object '%s'\n",
    976				klp_is_module(obj) ? obj->name : "vmlinux");
    977			goto err;
    978		}
    979
    980		ret = klp_patch_object(obj);
    981		if (ret) {
    982			pr_warn("failed to patch object '%s'\n",
    983				klp_is_module(obj) ? obj->name : "vmlinux");
    984			goto err;
    985		}
    986	}
    987
    988	klp_start_transition();
    989	patch->enabled = true;
    990	klp_try_complete_transition();
    991
    992	return 0;
    993err:
    994	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
    995
    996	klp_cancel_transition();
    997	return ret;
    998}
    999
   1000/**
   1001 * klp_enable_patch() - enable the livepatch
   1002 * @patch:	patch to be enabled
   1003 *
   1004 * Initializes the data structure associated with the patch, creates the sysfs
   1005 * interface, performs the needed symbol lookups and code relocations,
   1006 * registers the patched functions with ftrace.
   1007 *
   1008 * This function is supposed to be called from the livepatch module_init()
   1009 * callback.
   1010 *
   1011 * Return: 0 on success, otherwise error
   1012 */
   1013int klp_enable_patch(struct klp_patch *patch)
   1014{
   1015	int ret;
   1016	struct klp_object *obj;
   1017
   1018	if (!patch || !patch->mod || !patch->objs)
   1019		return -EINVAL;
   1020
   1021	klp_for_each_object_static(patch, obj) {
   1022		if (!obj->funcs)
   1023			return -EINVAL;
   1024	}
   1025
   1026
   1027	if (!is_livepatch_module(patch->mod)) {
   1028		pr_err("module %s is not marked as a livepatch module\n",
   1029		       patch->mod->name);
   1030		return -EINVAL;
   1031	}
   1032
   1033	if (!klp_initialized())
   1034		return -ENODEV;
   1035
   1036	if (!klp_have_reliable_stack()) {
   1037		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
   1038		pr_warn("The livepatch transition may never complete.\n");
   1039	}
   1040
   1041	mutex_lock(&klp_mutex);
   1042
   1043	if (!klp_is_patch_compatible(patch)) {
   1044		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
   1045			patch->mod->name);
   1046		mutex_unlock(&klp_mutex);
   1047		return -EINVAL;
   1048	}
   1049
   1050	if (!try_module_get(patch->mod)) {
   1051		mutex_unlock(&klp_mutex);
   1052		return -ENODEV;
   1053	}
   1054
   1055	klp_init_patch_early(patch);
   1056
   1057	ret = klp_init_patch(patch);
   1058	if (ret)
   1059		goto err;
   1060
   1061	ret = __klp_enable_patch(patch);
   1062	if (ret)
   1063		goto err;
   1064
   1065	mutex_unlock(&klp_mutex);
   1066
   1067	return 0;
   1068
   1069err:
   1070	klp_free_patch_start(patch);
   1071
   1072	mutex_unlock(&klp_mutex);
   1073
   1074	klp_free_patch_finish(patch);
   1075
   1076	return ret;
   1077}
   1078EXPORT_SYMBOL_GPL(klp_enable_patch);
   1079
   1080/*
   1081 * This function unpatches objects from the replaced livepatches.
   1082 *
   1083 * We could be pretty aggressive here. It is called in the situation where
   1084 * these structures are no longer accessed from the ftrace handler.
   1085 * All functions are redirected by the klp_transition_patch. They
   1086 * use either a new code or they are in the original code because
   1087 * of the special nop function patches.
   1088 *
   1089 * The only exception is when the transition was forced. In this case,
   1090 * klp_ftrace_handler() might still see the replaced patch on the stack.
   1091 * Fortunately, it is carefully designed to work with removed functions
   1092 * thanks to RCU. We only have to keep the patches on the system. Also
   1093 * this is handled transparently by patch->module_put.
   1094 */
   1095void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
   1096{
   1097	struct klp_patch *old_patch;
   1098
   1099	klp_for_each_patch(old_patch) {
   1100		if (old_patch == new_patch)
   1101			return;
   1102
   1103		old_patch->enabled = false;
   1104		klp_unpatch_objects(old_patch);
   1105	}
   1106}
   1107
   1108/*
   1109 * This function removes the dynamically allocated 'nop' functions.
   1110 *
   1111 * We could be pretty aggressive. NOPs do not change the existing
   1112 * behavior except for adding unnecessary delay by the ftrace handler.
   1113 *
   1114 * It is safe even when the transition was forced. The ftrace handler
   1115 * will see a valid ops->func_stack entry thanks to RCU.
   1116 *
   1117 * We could even free the NOPs structures. They must be the last entry
   1118 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
   1119 * It does the same as klp_synchronize_transition() to make sure that
   1120 * nobody is inside the ftrace handler once the operation finishes.
   1121 *
   1122 * IMPORTANT: It must be called right after removing the replaced patches!
   1123 */
   1124void klp_discard_nops(struct klp_patch *new_patch)
   1125{
   1126	klp_unpatch_objects_dynamic(klp_transition_patch);
   1127	klp_free_objects_dynamic(klp_transition_patch);
   1128}
   1129
   1130/*
   1131 * Remove parts of patches that touch a given kernel module. The list of
   1132 * patches processed might be limited. When limit is NULL, all patches
   1133 * will be handled.
   1134 */
   1135static void klp_cleanup_module_patches_limited(struct module *mod,
   1136					       struct klp_patch *limit)
   1137{
   1138	struct klp_patch *patch;
   1139	struct klp_object *obj;
   1140
   1141	klp_for_each_patch(patch) {
   1142		if (patch == limit)
   1143			break;
   1144
   1145		klp_for_each_object(patch, obj) {
   1146			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
   1147				continue;
   1148
   1149			if (patch != klp_transition_patch)
   1150				klp_pre_unpatch_callback(obj);
   1151
   1152			pr_notice("reverting patch '%s' on unloading module '%s'\n",
   1153				  patch->mod->name, obj->mod->name);
   1154			klp_unpatch_object(obj);
   1155
   1156			klp_post_unpatch_callback(obj);
   1157
   1158			klp_free_object_loaded(obj);
   1159			break;
   1160		}
   1161	}
   1162}
   1163
   1164int klp_module_coming(struct module *mod)
   1165{
   1166	int ret;
   1167	struct klp_patch *patch;
   1168	struct klp_object *obj;
   1169
   1170	if (WARN_ON(mod->state != MODULE_STATE_COMING))
   1171		return -EINVAL;
   1172
   1173	if (!strcmp(mod->name, "vmlinux")) {
   1174		pr_err("vmlinux.ko: invalid module name");
   1175		return -EINVAL;
   1176	}
   1177
   1178	mutex_lock(&klp_mutex);
   1179	/*
   1180	 * Each module has to know that klp_module_coming()
   1181	 * has been called. We never know what module will
   1182	 * get patched by a new patch.
   1183	 */
   1184	mod->klp_alive = true;
   1185
   1186	klp_for_each_patch(patch) {
   1187		klp_for_each_object(patch, obj) {
   1188			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
   1189				continue;
   1190
   1191			obj->mod = mod;
   1192
   1193			ret = klp_init_object_loaded(patch, obj);
   1194			if (ret) {
   1195				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
   1196					patch->mod->name, obj->mod->name, ret);
   1197				goto err;
   1198			}
   1199
   1200			pr_notice("applying patch '%s' to loading module '%s'\n",
   1201				  patch->mod->name, obj->mod->name);
   1202
   1203			ret = klp_pre_patch_callback(obj);
   1204			if (ret) {
   1205				pr_warn("pre-patch callback failed for object '%s'\n",
   1206					obj->name);
   1207				goto err;
   1208			}
   1209
   1210			ret = klp_patch_object(obj);
   1211			if (ret) {
   1212				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
   1213					patch->mod->name, obj->mod->name, ret);
   1214
   1215				klp_post_unpatch_callback(obj);
   1216				goto err;
   1217			}
   1218
   1219			if (patch != klp_transition_patch)
   1220				klp_post_patch_callback(obj);
   1221
   1222			break;
   1223		}
   1224	}
   1225
   1226	mutex_unlock(&klp_mutex);
   1227
   1228	return 0;
   1229
   1230err:
   1231	/*
   1232	 * If a patch is unsuccessfully applied, return
   1233	 * error to the module loader.
   1234	 */
   1235	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
   1236		patch->mod->name, obj->mod->name, obj->mod->name);
   1237	mod->klp_alive = false;
   1238	obj->mod = NULL;
   1239	klp_cleanup_module_patches_limited(mod, patch);
   1240	mutex_unlock(&klp_mutex);
   1241
   1242	return ret;
   1243}
   1244
   1245void klp_module_going(struct module *mod)
   1246{
   1247	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
   1248		    mod->state != MODULE_STATE_COMING))
   1249		return;
   1250
   1251	mutex_lock(&klp_mutex);
   1252	/*
   1253	 * Each module has to know that klp_module_going()
   1254	 * has been called. We never know what module will
   1255	 * get patched by a new patch.
   1256	 */
   1257	mod->klp_alive = false;
   1258
   1259	klp_cleanup_module_patches_limited(mod, NULL);
   1260
   1261	mutex_unlock(&klp_mutex);
   1262}
   1263
   1264static int __init klp_init(void)
   1265{
   1266	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
   1267	if (!klp_root_kobj)
   1268		return -ENOMEM;
   1269
   1270	return 0;
   1271}
   1272
   1273module_init(klp_init);