cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (80879B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (C) 2002 Richard Henderson
      4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
      5 */
      6
      7#define INCLUDE_VERMAGIC
      8
      9#include <linux/export.h>
     10#include <linux/extable.h>
     11#include <linux/moduleloader.h>
     12#include <linux/module_signature.h>
     13#include <linux/trace_events.h>
     14#include <linux/init.h>
     15#include <linux/kallsyms.h>
     16#include <linux/buildid.h>
     17#include <linux/fs.h>
     18#include <linux/kernel.h>
     19#include <linux/kernel_read_file.h>
     20#include <linux/slab.h>
     21#include <linux/vmalloc.h>
     22#include <linux/elf.h>
     23#include <linux/seq_file.h>
     24#include <linux/syscalls.h>
     25#include <linux/fcntl.h>
     26#include <linux/rcupdate.h>
     27#include <linux/capability.h>
     28#include <linux/cpu.h>
     29#include <linux/moduleparam.h>
     30#include <linux/errno.h>
     31#include <linux/err.h>
     32#include <linux/vermagic.h>
     33#include <linux/notifier.h>
     34#include <linux/sched.h>
     35#include <linux/device.h>
     36#include <linux/string.h>
     37#include <linux/mutex.h>
     38#include <linux/rculist.h>
     39#include <linux/uaccess.h>
     40#include <asm/cacheflush.h>
     41#include <linux/set_memory.h>
     42#include <asm/mmu_context.h>
     43#include <linux/license.h>
     44#include <asm/sections.h>
     45#include <linux/tracepoint.h>
     46#include <linux/ftrace.h>
     47#include <linux/livepatch.h>
     48#include <linux/async.h>
     49#include <linux/percpu.h>
     50#include <linux/kmemleak.h>
     51#include <linux/jump_label.h>
     52#include <linux/pfn.h>
     53#include <linux/bsearch.h>
     54#include <linux/dynamic_debug.h>
     55#include <linux/audit.h>
     56#include <uapi/linux/module.h>
     57#include "internal.h"
     58
     59#define CREATE_TRACE_POINTS
     60#include <trace/events/module.h>
     61
     62/*
     63 * Mutex protects:
     64 * 1) List of modules (also safely readable with preempt_disable),
     65 * 2) module_use links,
     66 * 3) mod_tree.addr_min/mod_tree.addr_max.
     67 * (delete and add uses RCU list operations).
     68 */
     69DEFINE_MUTEX(module_mutex);
     70LIST_HEAD(modules);
     71
     72/* Work queue for freeing init sections in success case */
     73static void do_free_init(struct work_struct *w);
     74static DECLARE_WORK(init_free_wq, do_free_init);
     75static LLIST_HEAD(init_free_list);
     76
     77struct mod_tree_root mod_tree __cacheline_aligned = {
     78	.addr_min = -1UL,
     79};
     80
     81#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
     82struct mod_tree_root mod_data_tree __cacheline_aligned = {
     83	.addr_min = -1UL,
     84};
     85#endif
     86
     87#define module_addr_min mod_tree.addr_min
     88#define module_addr_max mod_tree.addr_max
     89
     90struct symsearch {
     91	const struct kernel_symbol *start, *stop;
     92	const s32 *crcs;
     93	enum mod_license license;
     94};
     95
     96/*
     97 * Bounds of module text, for speeding up __module_address.
     98 * Protected by module_mutex.
     99 */
    100static void __mod_update_bounds(void *base, unsigned int size, struct mod_tree_root *tree)
    101{
    102	unsigned long min = (unsigned long)base;
    103	unsigned long max = min + size;
    104
    105	if (min < tree->addr_min)
    106		tree->addr_min = min;
    107	if (max > tree->addr_max)
    108		tree->addr_max = max;
    109}
    110
    111static void mod_update_bounds(struct module *mod)
    112{
    113	__mod_update_bounds(mod->core_layout.base, mod->core_layout.size, &mod_tree);
    114	if (mod->init_layout.size)
    115		__mod_update_bounds(mod->init_layout.base, mod->init_layout.size, &mod_tree);
    116#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
    117	__mod_update_bounds(mod->data_layout.base, mod->data_layout.size, &mod_data_tree);
    118#endif
    119}
    120
    121/* Block module loading/unloading? */
    122int modules_disabled = 0;
    123core_param(nomodule, modules_disabled, bint, 0);
    124
    125/* Waiting for a module to finish initializing? */
    126static DECLARE_WAIT_QUEUE_HEAD(module_wq);
    127
    128static BLOCKING_NOTIFIER_HEAD(module_notify_list);
    129
    130int register_module_notifier(struct notifier_block *nb)
    131{
    132	return blocking_notifier_chain_register(&module_notify_list, nb);
    133}
    134EXPORT_SYMBOL(register_module_notifier);
    135
    136int unregister_module_notifier(struct notifier_block *nb)
    137{
    138	return blocking_notifier_chain_unregister(&module_notify_list, nb);
    139}
    140EXPORT_SYMBOL(unregister_module_notifier);
    141
    142/*
    143 * We require a truly strong try_module_get(): 0 means success.
    144 * Otherwise an error is returned due to ongoing or failed
    145 * initialization etc.
    146 */
    147static inline int strong_try_module_get(struct module *mod)
    148{
    149	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
    150	if (mod && mod->state == MODULE_STATE_COMING)
    151		return -EBUSY;
    152	if (try_module_get(mod))
    153		return 0;
    154	else
    155		return -ENOENT;
    156}
    157
    158static inline void add_taint_module(struct module *mod, unsigned flag,
    159				    enum lockdep_ok lockdep_ok)
    160{
    161	add_taint(flag, lockdep_ok);
    162	set_bit(flag, &mod->taints);
    163}
    164
    165/*
    166 * A thread that wants to hold a reference to a module only while it
    167 * is running can call this to safely exit.
    168 */
    169void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
    170{
    171	module_put(mod);
    172	kthread_exit(code);
    173}
    174EXPORT_SYMBOL(__module_put_and_kthread_exit);
    175
    176/* Find a module section: 0 means not found. */
    177static unsigned int find_sec(const struct load_info *info, const char *name)
    178{
    179	unsigned int i;
    180
    181	for (i = 1; i < info->hdr->e_shnum; i++) {
    182		Elf_Shdr *shdr = &info->sechdrs[i];
    183		/* Alloc bit cleared means "ignore it." */
    184		if ((shdr->sh_flags & SHF_ALLOC)
    185		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
    186			return i;
    187	}
    188	return 0;
    189}
    190
    191/* Find a module section, or NULL. */
    192static void *section_addr(const struct load_info *info, const char *name)
    193{
    194	/* Section 0 has sh_addr 0. */
    195	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
    196}
    197
    198/* Find a module section, or NULL.  Fill in number of "objects" in section. */
    199static void *section_objs(const struct load_info *info,
    200			  const char *name,
    201			  size_t object_size,
    202			  unsigned int *num)
    203{
    204	unsigned int sec = find_sec(info, name);
    205
    206	/* Section 0 has sh_addr 0 and sh_size 0. */
    207	*num = info->sechdrs[sec].sh_size / object_size;
    208	return (void *)info->sechdrs[sec].sh_addr;
    209}
    210
    211/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
    212static unsigned int find_any_sec(const struct load_info *info, const char *name)
    213{
    214	unsigned int i;
    215
    216	for (i = 1; i < info->hdr->e_shnum; i++) {
    217		Elf_Shdr *shdr = &info->sechdrs[i];
    218		if (strcmp(info->secstrings + shdr->sh_name, name) == 0)
    219			return i;
    220	}
    221	return 0;
    222}
    223
    224/*
    225 * Find a module section, or NULL. Fill in number of "objects" in section.
    226 * Ignores SHF_ALLOC flag.
    227 */
    228static __maybe_unused void *any_section_objs(const struct load_info *info,
    229					     const char *name,
    230					     size_t object_size,
    231					     unsigned int *num)
    232{
    233	unsigned int sec = find_any_sec(info, name);
    234
    235	/* Section 0 has sh_addr 0 and sh_size 0. */
    236	*num = info->sechdrs[sec].sh_size / object_size;
    237	return (void *)info->sechdrs[sec].sh_addr;
    238}
    239
    240#ifndef CONFIG_MODVERSIONS
    241#define symversion(base, idx) NULL
    242#else
    243#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
    244#endif
    245
    246static const char *kernel_symbol_name(const struct kernel_symbol *sym)
    247{
    248#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
    249	return offset_to_ptr(&sym->name_offset);
    250#else
    251	return sym->name;
    252#endif
    253}
    254
    255static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
    256{
    257#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
    258	if (!sym->namespace_offset)
    259		return NULL;
    260	return offset_to_ptr(&sym->namespace_offset);
    261#else
    262	return sym->namespace;
    263#endif
    264}
    265
    266int cmp_name(const void *name, const void *sym)
    267{
    268	return strcmp(name, kernel_symbol_name(sym));
    269}
    270
    271static bool find_exported_symbol_in_section(const struct symsearch *syms,
    272					    struct module *owner,
    273					    struct find_symbol_arg *fsa)
    274{
    275	struct kernel_symbol *sym;
    276
    277	if (!fsa->gplok && syms->license == GPL_ONLY)
    278		return false;
    279
    280	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
    281			sizeof(struct kernel_symbol), cmp_name);
    282	if (!sym)
    283		return false;
    284
    285	fsa->owner = owner;
    286	fsa->crc = symversion(syms->crcs, sym - syms->start);
    287	fsa->sym = sym;
    288	fsa->license = syms->license;
    289
    290	return true;
    291}
    292
    293/*
    294 * Find an exported symbol and return it, along with, (optional) crc and
    295 * (optional) module which owns it.  Needs preempt disabled or module_mutex.
    296 */
    297bool find_symbol(struct find_symbol_arg *fsa)
    298{
    299	static const struct symsearch arr[] = {
    300		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
    301		  NOT_GPL_ONLY },
    302		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
    303		  __start___kcrctab_gpl,
    304		  GPL_ONLY },
    305	};
    306	struct module *mod;
    307	unsigned int i;
    308
    309	module_assert_mutex_or_preempt();
    310
    311	for (i = 0; i < ARRAY_SIZE(arr); i++)
    312		if (find_exported_symbol_in_section(&arr[i], NULL, fsa))
    313			return true;
    314
    315	list_for_each_entry_rcu(mod, &modules, list,
    316				lockdep_is_held(&module_mutex)) {
    317		struct symsearch arr[] = {
    318			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
    319			  NOT_GPL_ONLY },
    320			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
    321			  mod->gpl_crcs,
    322			  GPL_ONLY },
    323		};
    324
    325		if (mod->state == MODULE_STATE_UNFORMED)
    326			continue;
    327
    328		for (i = 0; i < ARRAY_SIZE(arr); i++)
    329			if (find_exported_symbol_in_section(&arr[i], mod, fsa))
    330				return true;
    331	}
    332
    333	pr_debug("Failed to find symbol %s\n", fsa->name);
    334	return false;
    335}
    336
    337/*
    338 * Search for module by name: must hold module_mutex (or preempt disabled
    339 * for read-only access).
    340 */
    341struct module *find_module_all(const char *name, size_t len,
    342			       bool even_unformed)
    343{
    344	struct module *mod;
    345
    346	module_assert_mutex_or_preempt();
    347
    348	list_for_each_entry_rcu(mod, &modules, list,
    349				lockdep_is_held(&module_mutex)) {
    350		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
    351			continue;
    352		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
    353			return mod;
    354	}
    355	return NULL;
    356}
    357
    358struct module *find_module(const char *name)
    359{
    360	return find_module_all(name, strlen(name), false);
    361}
    362
    363#ifdef CONFIG_SMP
    364
    365static inline void __percpu *mod_percpu(struct module *mod)
    366{
    367	return mod->percpu;
    368}
    369
    370static int percpu_modalloc(struct module *mod, struct load_info *info)
    371{
    372	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
    373	unsigned long align = pcpusec->sh_addralign;
    374
    375	if (!pcpusec->sh_size)
    376		return 0;
    377
    378	if (align > PAGE_SIZE) {
    379		pr_warn("%s: per-cpu alignment %li > %li\n",
    380			mod->name, align, PAGE_SIZE);
    381		align = PAGE_SIZE;
    382	}
    383
    384	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
    385	if (!mod->percpu) {
    386		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
    387			mod->name, (unsigned long)pcpusec->sh_size);
    388		return -ENOMEM;
    389	}
    390	mod->percpu_size = pcpusec->sh_size;
    391	return 0;
    392}
    393
    394static void percpu_modfree(struct module *mod)
    395{
    396	free_percpu(mod->percpu);
    397}
    398
    399static unsigned int find_pcpusec(struct load_info *info)
    400{
    401	return find_sec(info, ".data..percpu");
    402}
    403
    404static void percpu_modcopy(struct module *mod,
    405			   const void *from, unsigned long size)
    406{
    407	int cpu;
    408
    409	for_each_possible_cpu(cpu)
    410		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
    411}
    412
    413bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
    414{
    415	struct module *mod;
    416	unsigned int cpu;
    417
    418	preempt_disable();
    419
    420	list_for_each_entry_rcu(mod, &modules, list) {
    421		if (mod->state == MODULE_STATE_UNFORMED)
    422			continue;
    423		if (!mod->percpu_size)
    424			continue;
    425		for_each_possible_cpu(cpu) {
    426			void *start = per_cpu_ptr(mod->percpu, cpu);
    427			void *va = (void *)addr;
    428
    429			if (va >= start && va < start + mod->percpu_size) {
    430				if (can_addr) {
    431					*can_addr = (unsigned long) (va - start);
    432					*can_addr += (unsigned long)
    433						per_cpu_ptr(mod->percpu,
    434							    get_boot_cpu_id());
    435				}
    436				preempt_enable();
    437				return true;
    438			}
    439		}
    440	}
    441
    442	preempt_enable();
    443	return false;
    444}
    445
    446/**
    447 * is_module_percpu_address() - test whether address is from module static percpu
    448 * @addr: address to test
    449 *
    450 * Test whether @addr belongs to module static percpu area.
    451 *
    452 * Return: %true if @addr is from module static percpu area
    453 */
    454bool is_module_percpu_address(unsigned long addr)
    455{
    456	return __is_module_percpu_address(addr, NULL);
    457}
    458
    459#else /* ... !CONFIG_SMP */
    460
    461static inline void __percpu *mod_percpu(struct module *mod)
    462{
    463	return NULL;
    464}
    465static int percpu_modalloc(struct module *mod, struct load_info *info)
    466{
    467	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
    468	if (info->sechdrs[info->index.pcpu].sh_size != 0)
    469		return -ENOMEM;
    470	return 0;
    471}
    472static inline void percpu_modfree(struct module *mod)
    473{
    474}
    475static unsigned int find_pcpusec(struct load_info *info)
    476{
    477	return 0;
    478}
    479static inline void percpu_modcopy(struct module *mod,
    480				  const void *from, unsigned long size)
    481{
    482	/* pcpusec should be 0, and size of that section should be 0. */
    483	BUG_ON(size != 0);
    484}
    485bool is_module_percpu_address(unsigned long addr)
    486{
    487	return false;
    488}
    489
    490bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
    491{
    492	return false;
    493}
    494
    495#endif /* CONFIG_SMP */
    496
    497#define MODINFO_ATTR(field)	\
    498static void setup_modinfo_##field(struct module *mod, const char *s)  \
    499{                                                                     \
    500	mod->field = kstrdup(s, GFP_KERNEL);                          \
    501}                                                                     \
    502static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
    503			struct module_kobject *mk, char *buffer)      \
    504{                                                                     \
    505	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
    506}                                                                     \
    507static int modinfo_##field##_exists(struct module *mod)               \
    508{                                                                     \
    509	return mod->field != NULL;                                    \
    510}                                                                     \
    511static void free_modinfo_##field(struct module *mod)                  \
    512{                                                                     \
    513	kfree(mod->field);                                            \
    514	mod->field = NULL;                                            \
    515}                                                                     \
    516static struct module_attribute modinfo_##field = {                    \
    517	.attr = { .name = __stringify(field), .mode = 0444 },         \
    518	.show = show_modinfo_##field,                                 \
    519	.setup = setup_modinfo_##field,                               \
    520	.test = modinfo_##field##_exists,                             \
    521	.free = free_modinfo_##field,                                 \
    522};
    523
    524MODINFO_ATTR(version);
    525MODINFO_ATTR(srcversion);
    526
    527static char last_unloaded_module[MODULE_NAME_LEN+1];
    528
    529#ifdef CONFIG_MODULE_UNLOAD
    530
    531EXPORT_TRACEPOINT_SYMBOL(module_get);
    532
    533/* MODULE_REF_BASE is the base reference count by kmodule loader. */
    534#define MODULE_REF_BASE	1
    535
    536/* Init the unload section of the module. */
    537static int module_unload_init(struct module *mod)
    538{
    539	/*
    540	 * Initialize reference counter to MODULE_REF_BASE.
    541	 * refcnt == 0 means module is going.
    542	 */
    543	atomic_set(&mod->refcnt, MODULE_REF_BASE);
    544
    545	INIT_LIST_HEAD(&mod->source_list);
    546	INIT_LIST_HEAD(&mod->target_list);
    547
    548	/* Hold reference count during initialization. */
    549	atomic_inc(&mod->refcnt);
    550
    551	return 0;
    552}
    553
    554/* Does a already use b? */
    555static int already_uses(struct module *a, struct module *b)
    556{
    557	struct module_use *use;
    558
    559	list_for_each_entry(use, &b->source_list, source_list) {
    560		if (use->source == a) {
    561			pr_debug("%s uses %s!\n", a->name, b->name);
    562			return 1;
    563		}
    564	}
    565	pr_debug("%s does not use %s!\n", a->name, b->name);
    566	return 0;
    567}
    568
    569/*
    570 * Module a uses b
    571 *  - we add 'a' as a "source", 'b' as a "target" of module use
    572 *  - the module_use is added to the list of 'b' sources (so
    573 *    'b' can walk the list to see who sourced them), and of 'a'
    574 *    targets (so 'a' can see what modules it targets).
    575 */
    576static int add_module_usage(struct module *a, struct module *b)
    577{
    578	struct module_use *use;
    579
    580	pr_debug("Allocating new usage for %s.\n", a->name);
    581	use = kmalloc(sizeof(*use), GFP_ATOMIC);
    582	if (!use)
    583		return -ENOMEM;
    584
    585	use->source = a;
    586	use->target = b;
    587	list_add(&use->source_list, &b->source_list);
    588	list_add(&use->target_list, &a->target_list);
    589	return 0;
    590}
    591
    592/* Module a uses b: caller needs module_mutex() */
    593static int ref_module(struct module *a, struct module *b)
    594{
    595	int err;
    596
    597	if (b == NULL || already_uses(a, b))
    598		return 0;
    599
    600	/* If module isn't available, we fail. */
    601	err = strong_try_module_get(b);
    602	if (err)
    603		return err;
    604
    605	err = add_module_usage(a, b);
    606	if (err) {
    607		module_put(b);
    608		return err;
    609	}
    610	return 0;
    611}
    612
    613/* Clear the unload stuff of the module. */
    614static void module_unload_free(struct module *mod)
    615{
    616	struct module_use *use, *tmp;
    617
    618	mutex_lock(&module_mutex);
    619	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
    620		struct module *i = use->target;
    621		pr_debug("%s unusing %s\n", mod->name, i->name);
    622		module_put(i);
    623		list_del(&use->source_list);
    624		list_del(&use->target_list);
    625		kfree(use);
    626	}
    627	mutex_unlock(&module_mutex);
    628}
    629
    630#ifdef CONFIG_MODULE_FORCE_UNLOAD
    631static inline int try_force_unload(unsigned int flags)
    632{
    633	int ret = (flags & O_TRUNC);
    634	if (ret)
    635		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
    636	return ret;
    637}
    638#else
    639static inline int try_force_unload(unsigned int flags)
    640{
    641	return 0;
    642}
    643#endif /* CONFIG_MODULE_FORCE_UNLOAD */
    644
    645/* Try to release refcount of module, 0 means success. */
    646static int try_release_module_ref(struct module *mod)
    647{
    648	int ret;
    649
    650	/* Try to decrement refcnt which we set at loading */
    651	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
    652	BUG_ON(ret < 0);
    653	if (ret)
    654		/* Someone can put this right now, recover with checking */
    655		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
    656
    657	return ret;
    658}
    659
    660static int try_stop_module(struct module *mod, int flags, int *forced)
    661{
    662	/* If it's not unused, quit unless we're forcing. */
    663	if (try_release_module_ref(mod) != 0) {
    664		*forced = try_force_unload(flags);
    665		if (!(*forced))
    666			return -EWOULDBLOCK;
    667	}
    668
    669	/* Mark it as dying. */
    670	mod->state = MODULE_STATE_GOING;
    671
    672	return 0;
    673}
    674
    675/**
    676 * module_refcount() - return the refcount or -1 if unloading
    677 * @mod:	the module we're checking
    678 *
    679 * Return:
    680 *	-1 if the module is in the process of unloading
    681 *	otherwise the number of references in the kernel to the module
    682 */
    683int module_refcount(struct module *mod)
    684{
    685	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
    686}
    687EXPORT_SYMBOL(module_refcount);
    688
    689/* This exists whether we can unload or not */
    690static void free_module(struct module *mod);
    691
    692SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
    693		unsigned int, flags)
    694{
    695	struct module *mod;
    696	char name[MODULE_NAME_LEN];
    697	int ret, forced = 0;
    698
    699	if (!capable(CAP_SYS_MODULE) || modules_disabled)
    700		return -EPERM;
    701
    702	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
    703		return -EFAULT;
    704	name[MODULE_NAME_LEN-1] = '\0';
    705
    706	audit_log_kern_module(name);
    707
    708	if (mutex_lock_interruptible(&module_mutex) != 0)
    709		return -EINTR;
    710
    711	mod = find_module(name);
    712	if (!mod) {
    713		ret = -ENOENT;
    714		goto out;
    715	}
    716
    717	if (!list_empty(&mod->source_list)) {
    718		/* Other modules depend on us: get rid of them first. */
    719		ret = -EWOULDBLOCK;
    720		goto out;
    721	}
    722
    723	/* Doing init or already dying? */
    724	if (mod->state != MODULE_STATE_LIVE) {
    725		/* FIXME: if (force), slam module count damn the torpedoes */
    726		pr_debug("%s already dying\n", mod->name);
    727		ret = -EBUSY;
    728		goto out;
    729	}
    730
    731	/* If it has an init func, it must have an exit func to unload */
    732	if (mod->init && !mod->exit) {
    733		forced = try_force_unload(flags);
    734		if (!forced) {
    735			/* This module can't be removed */
    736			ret = -EBUSY;
    737			goto out;
    738		}
    739	}
    740
    741	ret = try_stop_module(mod, flags, &forced);
    742	if (ret != 0)
    743		goto out;
    744
    745	mutex_unlock(&module_mutex);
    746	/* Final destruction now no one is using it. */
    747	if (mod->exit != NULL)
    748		mod->exit();
    749	blocking_notifier_call_chain(&module_notify_list,
    750				     MODULE_STATE_GOING, mod);
    751	klp_module_going(mod);
    752	ftrace_release_mod(mod);
    753
    754	async_synchronize_full();
    755
    756	/* Store the name of the last unloaded module for diagnostic purposes */
    757	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
    758
    759	free_module(mod);
    760	/* someone could wait for the module in add_unformed_module() */
    761	wake_up_all(&module_wq);
    762	return 0;
    763out:
    764	mutex_unlock(&module_mutex);
    765	return ret;
    766}
    767
    768void __symbol_put(const char *symbol)
    769{
    770	struct find_symbol_arg fsa = {
    771		.name	= symbol,
    772		.gplok	= true,
    773	};
    774
    775	preempt_disable();
    776	BUG_ON(!find_symbol(&fsa));
    777	module_put(fsa.owner);
    778	preempt_enable();
    779}
    780EXPORT_SYMBOL(__symbol_put);
    781
    782/* Note this assumes addr is a function, which it currently always is. */
    783void symbol_put_addr(void *addr)
    784{
    785	struct module *modaddr;
    786	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
    787
    788	if (core_kernel_text(a))
    789		return;
    790
    791	/*
    792	 * Even though we hold a reference on the module; we still need to
    793	 * disable preemption in order to safely traverse the data structure.
    794	 */
    795	preempt_disable();
    796	modaddr = __module_text_address(a);
    797	BUG_ON(!modaddr);
    798	module_put(modaddr);
    799	preempt_enable();
    800}
    801EXPORT_SYMBOL_GPL(symbol_put_addr);
    802
    803static ssize_t show_refcnt(struct module_attribute *mattr,
    804			   struct module_kobject *mk, char *buffer)
    805{
    806	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
    807}
    808
    809static struct module_attribute modinfo_refcnt =
    810	__ATTR(refcnt, 0444, show_refcnt, NULL);
    811
    812void __module_get(struct module *module)
    813{
    814	if (module) {
    815		preempt_disable();
    816		atomic_inc(&module->refcnt);
    817		trace_module_get(module, _RET_IP_);
    818		preempt_enable();
    819	}
    820}
    821EXPORT_SYMBOL(__module_get);
    822
    823bool try_module_get(struct module *module)
    824{
    825	bool ret = true;
    826
    827	if (module) {
    828		preempt_disable();
    829		/* Note: here, we can fail to get a reference */
    830		if (likely(module_is_live(module) &&
    831			   atomic_inc_not_zero(&module->refcnt) != 0))
    832			trace_module_get(module, _RET_IP_);
    833		else
    834			ret = false;
    835
    836		preempt_enable();
    837	}
    838	return ret;
    839}
    840EXPORT_SYMBOL(try_module_get);
    841
    842void module_put(struct module *module)
    843{
    844	int ret;
    845
    846	if (module) {
    847		preempt_disable();
    848		ret = atomic_dec_if_positive(&module->refcnt);
    849		WARN_ON(ret < 0);	/* Failed to put refcount */
    850		trace_module_put(module, _RET_IP_);
    851		preempt_enable();
    852	}
    853}
    854EXPORT_SYMBOL(module_put);
    855
    856#else /* !CONFIG_MODULE_UNLOAD */
    857static inline void module_unload_free(struct module *mod)
    858{
    859}
    860
    861static int ref_module(struct module *a, struct module *b)
    862{
    863	return strong_try_module_get(b);
    864}
    865
    866static inline int module_unload_init(struct module *mod)
    867{
    868	return 0;
    869}
    870#endif /* CONFIG_MODULE_UNLOAD */
    871
    872size_t module_flags_taint(unsigned long taints, char *buf)
    873{
    874	size_t l = 0;
    875	int i;
    876
    877	for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
    878		if (taint_flags[i].module && test_bit(i, &taints))
    879			buf[l++] = taint_flags[i].c_true;
    880	}
    881
    882	return l;
    883}
    884
    885static ssize_t show_initstate(struct module_attribute *mattr,
    886			      struct module_kobject *mk, char *buffer)
    887{
    888	const char *state = "unknown";
    889
    890	switch (mk->mod->state) {
    891	case MODULE_STATE_LIVE:
    892		state = "live";
    893		break;
    894	case MODULE_STATE_COMING:
    895		state = "coming";
    896		break;
    897	case MODULE_STATE_GOING:
    898		state = "going";
    899		break;
    900	default:
    901		BUG();
    902	}
    903	return sprintf(buffer, "%s\n", state);
    904}
    905
    906static struct module_attribute modinfo_initstate =
    907	__ATTR(initstate, 0444, show_initstate, NULL);
    908
    909static ssize_t store_uevent(struct module_attribute *mattr,
    910			    struct module_kobject *mk,
    911			    const char *buffer, size_t count)
    912{
    913	int rc;
    914
    915	rc = kobject_synth_uevent(&mk->kobj, buffer, count);
    916	return rc ? rc : count;
    917}
    918
    919struct module_attribute module_uevent =
    920	__ATTR(uevent, 0200, NULL, store_uevent);
    921
    922static ssize_t show_coresize(struct module_attribute *mattr,
    923			     struct module_kobject *mk, char *buffer)
    924{
    925	return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
    926}
    927
    928static struct module_attribute modinfo_coresize =
    929	__ATTR(coresize, 0444, show_coresize, NULL);
    930
    931#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
    932static ssize_t show_datasize(struct module_attribute *mattr,
    933			     struct module_kobject *mk, char *buffer)
    934{
    935	return sprintf(buffer, "%u\n", mk->mod->data_layout.size);
    936}
    937
    938static struct module_attribute modinfo_datasize =
    939	__ATTR(datasize, 0444, show_datasize, NULL);
    940#endif
    941
    942static ssize_t show_initsize(struct module_attribute *mattr,
    943			     struct module_kobject *mk, char *buffer)
    944{
    945	return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
    946}
    947
    948static struct module_attribute modinfo_initsize =
    949	__ATTR(initsize, 0444, show_initsize, NULL);
    950
    951static ssize_t show_taint(struct module_attribute *mattr,
    952			  struct module_kobject *mk, char *buffer)
    953{
    954	size_t l;
    955
    956	l = module_flags_taint(mk->mod->taints, buffer);
    957	buffer[l++] = '\n';
    958	return l;
    959}
    960
    961static struct module_attribute modinfo_taint =
    962	__ATTR(taint, 0444, show_taint, NULL);
    963
    964struct module_attribute *modinfo_attrs[] = {
    965	&module_uevent,
    966	&modinfo_version,
    967	&modinfo_srcversion,
    968	&modinfo_initstate,
    969	&modinfo_coresize,
    970#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
    971	&modinfo_datasize,
    972#endif
    973	&modinfo_initsize,
    974	&modinfo_taint,
    975#ifdef CONFIG_MODULE_UNLOAD
    976	&modinfo_refcnt,
    977#endif
    978	NULL,
    979};
    980
    981size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs);
    982
    983static const char vermagic[] = VERMAGIC_STRING;
    984
    985int try_to_force_load(struct module *mod, const char *reason)
    986{
    987#ifdef CONFIG_MODULE_FORCE_LOAD
    988	if (!test_taint(TAINT_FORCED_MODULE))
    989		pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
    990	add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
    991	return 0;
    992#else
    993	return -ENOEXEC;
    994#endif
    995}
    996
    997static char *get_modinfo(const struct load_info *info, const char *tag);
    998static char *get_next_modinfo(const struct load_info *info, const char *tag,
    999			      char *prev);
   1000
   1001static int verify_namespace_is_imported(const struct load_info *info,
   1002					const struct kernel_symbol *sym,
   1003					struct module *mod)
   1004{
   1005	const char *namespace;
   1006	char *imported_namespace;
   1007
   1008	namespace = kernel_symbol_namespace(sym);
   1009	if (namespace && namespace[0]) {
   1010		imported_namespace = get_modinfo(info, "import_ns");
   1011		while (imported_namespace) {
   1012			if (strcmp(namespace, imported_namespace) == 0)
   1013				return 0;
   1014			imported_namespace = get_next_modinfo(
   1015				info, "import_ns", imported_namespace);
   1016		}
   1017#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
   1018		pr_warn(
   1019#else
   1020		pr_err(
   1021#endif
   1022			"%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
   1023			mod->name, kernel_symbol_name(sym), namespace);
   1024#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
   1025		return -EINVAL;
   1026#endif
   1027	}
   1028	return 0;
   1029}
   1030
   1031static bool inherit_taint(struct module *mod, struct module *owner, const char *name)
   1032{
   1033	if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
   1034		return true;
   1035
   1036	if (mod->using_gplonly_symbols) {
   1037		pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n",
   1038			mod->name, name, owner->name);
   1039		return false;
   1040	}
   1041
   1042	if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
   1043		pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n",
   1044			mod->name, name, owner->name);
   1045		set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
   1046	}
   1047	return true;
   1048}
   1049
   1050/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
   1051static const struct kernel_symbol *resolve_symbol(struct module *mod,
   1052						  const struct load_info *info,
   1053						  const char *name,
   1054						  char ownername[])
   1055{
   1056	struct find_symbol_arg fsa = {
   1057		.name	= name,
   1058		.gplok	= !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)),
   1059		.warn	= true,
   1060	};
   1061	int err;
   1062
   1063	/*
   1064	 * The module_mutex should not be a heavily contended lock;
   1065	 * if we get the occasional sleep here, we'll go an extra iteration
   1066	 * in the wait_event_interruptible(), which is harmless.
   1067	 */
   1068	sched_annotate_sleep();
   1069	mutex_lock(&module_mutex);
   1070	if (!find_symbol(&fsa))
   1071		goto unlock;
   1072
   1073	if (fsa.license == GPL_ONLY)
   1074		mod->using_gplonly_symbols = true;
   1075
   1076	if (!inherit_taint(mod, fsa.owner, name)) {
   1077		fsa.sym = NULL;
   1078		goto getname;
   1079	}
   1080
   1081	if (!check_version(info, name, mod, fsa.crc)) {
   1082		fsa.sym = ERR_PTR(-EINVAL);
   1083		goto getname;
   1084	}
   1085
   1086	err = verify_namespace_is_imported(info, fsa.sym, mod);
   1087	if (err) {
   1088		fsa.sym = ERR_PTR(err);
   1089		goto getname;
   1090	}
   1091
   1092	err = ref_module(mod, fsa.owner);
   1093	if (err) {
   1094		fsa.sym = ERR_PTR(err);
   1095		goto getname;
   1096	}
   1097
   1098getname:
   1099	/* We must make copy under the lock if we failed to get ref. */
   1100	strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
   1101unlock:
   1102	mutex_unlock(&module_mutex);
   1103	return fsa.sym;
   1104}
   1105
   1106static const struct kernel_symbol *
   1107resolve_symbol_wait(struct module *mod,
   1108		    const struct load_info *info,
   1109		    const char *name)
   1110{
   1111	const struct kernel_symbol *ksym;
   1112	char owner[MODULE_NAME_LEN];
   1113
   1114	if (wait_event_interruptible_timeout(module_wq,
   1115			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
   1116			|| PTR_ERR(ksym) != -EBUSY,
   1117					     30 * HZ) <= 0) {
   1118		pr_warn("%s: gave up waiting for init of module %s.\n",
   1119			mod->name, owner);
   1120	}
   1121	return ksym;
   1122}
   1123
   1124void __weak module_memfree(void *module_region)
   1125{
   1126	/*
   1127	 * This memory may be RO, and freeing RO memory in an interrupt is not
   1128	 * supported by vmalloc.
   1129	 */
   1130	WARN_ON(in_interrupt());
   1131	vfree(module_region);
   1132}
   1133
   1134void __weak module_arch_cleanup(struct module *mod)
   1135{
   1136}
   1137
   1138void __weak module_arch_freeing_init(struct module *mod)
   1139{
   1140}
   1141
   1142static void cfi_cleanup(struct module *mod);
   1143
   1144/* Free a module, remove from lists, etc. */
   1145static void free_module(struct module *mod)
   1146{
   1147	trace_module_free(mod);
   1148
   1149	mod_sysfs_teardown(mod);
   1150
   1151	/*
   1152	 * We leave it in list to prevent duplicate loads, but make sure
   1153	 * that noone uses it while it's being deconstructed.
   1154	 */
   1155	mutex_lock(&module_mutex);
   1156	mod->state = MODULE_STATE_UNFORMED;
   1157	mutex_unlock(&module_mutex);
   1158
   1159	/* Remove dynamic debug info */
   1160	ddebug_remove_module(mod->name);
   1161
   1162	/* Arch-specific cleanup. */
   1163	module_arch_cleanup(mod);
   1164
   1165	/* Module unload stuff */
   1166	module_unload_free(mod);
   1167
   1168	/* Free any allocated parameters. */
   1169	destroy_params(mod->kp, mod->num_kp);
   1170
   1171	if (is_livepatch_module(mod))
   1172		free_module_elf(mod);
   1173
   1174	/* Now we can delete it from the lists */
   1175	mutex_lock(&module_mutex);
   1176	/* Unlink carefully: kallsyms could be walking list. */
   1177	list_del_rcu(&mod->list);
   1178	mod_tree_remove(mod);
   1179	/* Remove this module from bug list, this uses list_del_rcu */
   1180	module_bug_cleanup(mod);
   1181	/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
   1182	synchronize_rcu();
   1183	if (try_add_tainted_module(mod))
   1184		pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
   1185		       mod->name);
   1186	mutex_unlock(&module_mutex);
   1187
   1188	/* Clean up CFI for the module. */
   1189	cfi_cleanup(mod);
   1190
   1191	/* This may be empty, but that's OK */
   1192	module_arch_freeing_init(mod);
   1193	module_memfree(mod->init_layout.base);
   1194	kfree(mod->args);
   1195	percpu_modfree(mod);
   1196
   1197	/* Free lock-classes; relies on the preceding sync_rcu(). */
   1198	lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size);
   1199
   1200	/* Finally, free the core (containing the module structure) */
   1201	module_memfree(mod->core_layout.base);
   1202#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
   1203	vfree(mod->data_layout.base);
   1204#endif
   1205}
   1206
   1207void *__symbol_get(const char *symbol)
   1208{
   1209	struct find_symbol_arg fsa = {
   1210		.name	= symbol,
   1211		.gplok	= true,
   1212		.warn	= true,
   1213	};
   1214
   1215	preempt_disable();
   1216	if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) {
   1217		preempt_enable();
   1218		return NULL;
   1219	}
   1220	preempt_enable();
   1221	return (void *)kernel_symbol_value(fsa.sym);
   1222}
   1223EXPORT_SYMBOL_GPL(__symbol_get);
   1224
   1225/*
   1226 * Ensure that an exported symbol [global namespace] does not already exist
   1227 * in the kernel or in some other module's exported symbol table.
   1228 *
   1229 * You must hold the module_mutex.
   1230 */
   1231static int verify_exported_symbols(struct module *mod)
   1232{
   1233	unsigned int i;
   1234	const struct kernel_symbol *s;
   1235	struct {
   1236		const struct kernel_symbol *sym;
   1237		unsigned int num;
   1238	} arr[] = {
   1239		{ mod->syms, mod->num_syms },
   1240		{ mod->gpl_syms, mod->num_gpl_syms },
   1241	};
   1242
   1243	for (i = 0; i < ARRAY_SIZE(arr); i++) {
   1244		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
   1245			struct find_symbol_arg fsa = {
   1246				.name	= kernel_symbol_name(s),
   1247				.gplok	= true,
   1248			};
   1249			if (find_symbol(&fsa)) {
   1250				pr_err("%s: exports duplicate symbol %s"
   1251				       " (owned by %s)\n",
   1252				       mod->name, kernel_symbol_name(s),
   1253				       module_name(fsa.owner));
   1254				return -ENOEXEC;
   1255			}
   1256		}
   1257	}
   1258	return 0;
   1259}
   1260
   1261static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
   1262{
   1263	/*
   1264	 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
   1265	 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
   1266	 * i386 has a similar problem but may not deserve a fix.
   1267	 *
   1268	 * If we ever have to ignore many symbols, consider refactoring the code to
   1269	 * only warn if referenced by a relocation.
   1270	 */
   1271	if (emachine == EM_386 || emachine == EM_X86_64)
   1272		return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
   1273	return false;
   1274}
   1275
   1276/* Change all symbols so that st_value encodes the pointer directly. */
   1277static int simplify_symbols(struct module *mod, const struct load_info *info)
   1278{
   1279	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
   1280	Elf_Sym *sym = (void *)symsec->sh_addr;
   1281	unsigned long secbase;
   1282	unsigned int i;
   1283	int ret = 0;
   1284	const struct kernel_symbol *ksym;
   1285
   1286	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
   1287		const char *name = info->strtab + sym[i].st_name;
   1288
   1289		switch (sym[i].st_shndx) {
   1290		case SHN_COMMON:
   1291			/* Ignore common symbols */
   1292			if (!strncmp(name, "__gnu_lto", 9))
   1293				break;
   1294
   1295			/*
   1296			 * We compiled with -fno-common.  These are not
   1297			 * supposed to happen.
   1298			 */
   1299			pr_debug("Common symbol: %s\n", name);
   1300			pr_warn("%s: please compile with -fno-common\n",
   1301			       mod->name);
   1302			ret = -ENOEXEC;
   1303			break;
   1304
   1305		case SHN_ABS:
   1306			/* Don't need to do anything */
   1307			pr_debug("Absolute symbol: 0x%08lx\n",
   1308			       (long)sym[i].st_value);
   1309			break;
   1310
   1311		case SHN_LIVEPATCH:
   1312			/* Livepatch symbols are resolved by livepatch */
   1313			break;
   1314
   1315		case SHN_UNDEF:
   1316			ksym = resolve_symbol_wait(mod, info, name);
   1317			/* Ok if resolved.  */
   1318			if (ksym && !IS_ERR(ksym)) {
   1319				sym[i].st_value = kernel_symbol_value(ksym);
   1320				break;
   1321			}
   1322
   1323			/* Ok if weak or ignored.  */
   1324			if (!ksym &&
   1325			    (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
   1326			     ignore_undef_symbol(info->hdr->e_machine, name)))
   1327				break;
   1328
   1329			ret = PTR_ERR(ksym) ?: -ENOENT;
   1330			pr_warn("%s: Unknown symbol %s (err %d)\n",
   1331				mod->name, name, ret);
   1332			break;
   1333
   1334		default:
   1335			/* Divert to percpu allocation if a percpu var. */
   1336			if (sym[i].st_shndx == info->index.pcpu)
   1337				secbase = (unsigned long)mod_percpu(mod);
   1338			else
   1339				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
   1340			sym[i].st_value += secbase;
   1341			break;
   1342		}
   1343	}
   1344
   1345	return ret;
   1346}
   1347
   1348static int apply_relocations(struct module *mod, const struct load_info *info)
   1349{
   1350	unsigned int i;
   1351	int err = 0;
   1352
   1353	/* Now do relocations. */
   1354	for (i = 1; i < info->hdr->e_shnum; i++) {
   1355		unsigned int infosec = info->sechdrs[i].sh_info;
   1356
   1357		/* Not a valid relocation section? */
   1358		if (infosec >= info->hdr->e_shnum)
   1359			continue;
   1360
   1361		/* Don't bother with non-allocated sections */
   1362		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
   1363			continue;
   1364
   1365		if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
   1366			err = klp_apply_section_relocs(mod, info->sechdrs,
   1367						       info->secstrings,
   1368						       info->strtab,
   1369						       info->index.sym, i,
   1370						       NULL);
   1371		else if (info->sechdrs[i].sh_type == SHT_REL)
   1372			err = apply_relocate(info->sechdrs, info->strtab,
   1373					     info->index.sym, i, mod);
   1374		else if (info->sechdrs[i].sh_type == SHT_RELA)
   1375			err = apply_relocate_add(info->sechdrs, info->strtab,
   1376						 info->index.sym, i, mod);
   1377		if (err < 0)
   1378			break;
   1379	}
   1380	return err;
   1381}
   1382
   1383/* Additional bytes needed by arch in front of individual sections */
   1384unsigned int __weak arch_mod_section_prepend(struct module *mod,
   1385					     unsigned int section)
   1386{
   1387	/* default implementation just returns zero */
   1388	return 0;
   1389}
   1390
   1391/* Update size with this section: return offset. */
   1392long module_get_offset(struct module *mod, unsigned int *size,
   1393		       Elf_Shdr *sechdr, unsigned int section)
   1394{
   1395	long ret;
   1396
   1397	*size += arch_mod_section_prepend(mod, section);
   1398	ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
   1399	*size = ret + sechdr->sh_size;
   1400	return ret;
   1401}
   1402
   1403static bool module_init_layout_section(const char *sname)
   1404{
   1405#ifndef CONFIG_MODULE_UNLOAD
   1406	if (module_exit_section(sname))
   1407		return true;
   1408#endif
   1409	return module_init_section(sname);
   1410}
   1411
   1412/*
   1413 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
   1414 * might -- code, read-only data, read-write data, small data.  Tally
   1415 * sizes, and place the offsets into sh_entsize fields: high bit means it
   1416 * belongs in init.
   1417 */
   1418static void layout_sections(struct module *mod, struct load_info *info)
   1419{
   1420	static unsigned long const masks[][2] = {
   1421		/*
   1422		 * NOTE: all executable code must be the first section
   1423		 * in this array; otherwise modify the text_size
   1424		 * finder in the two loops below
   1425		 */
   1426		{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
   1427		{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
   1428		{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
   1429		{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
   1430		{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
   1431	};
   1432	unsigned int m, i;
   1433
   1434	for (i = 0; i < info->hdr->e_shnum; i++)
   1435		info->sechdrs[i].sh_entsize = ~0UL;
   1436
   1437	pr_debug("Core section allocation order:\n");
   1438	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
   1439		for (i = 0; i < info->hdr->e_shnum; ++i) {
   1440			Elf_Shdr *s = &info->sechdrs[i];
   1441			const char *sname = info->secstrings + s->sh_name;
   1442			unsigned int *sizep;
   1443
   1444			if ((s->sh_flags & masks[m][0]) != masks[m][0]
   1445			    || (s->sh_flags & masks[m][1])
   1446			    || s->sh_entsize != ~0UL
   1447			    || module_init_layout_section(sname))
   1448				continue;
   1449			sizep = m ? &mod->data_layout.size : &mod->core_layout.size;
   1450			s->sh_entsize = module_get_offset(mod, sizep, s, i);
   1451			pr_debug("\t%s\n", sname);
   1452		}
   1453		switch (m) {
   1454		case 0: /* executable */
   1455			mod->core_layout.size = strict_align(mod->core_layout.size);
   1456			mod->core_layout.text_size = mod->core_layout.size;
   1457			break;
   1458		case 1: /* RO: text and ro-data */
   1459			mod->data_layout.size = strict_align(mod->data_layout.size);
   1460			mod->data_layout.ro_size = mod->data_layout.size;
   1461			break;
   1462		case 2: /* RO after init */
   1463			mod->data_layout.size = strict_align(mod->data_layout.size);
   1464			mod->data_layout.ro_after_init_size = mod->data_layout.size;
   1465			break;
   1466		case 4: /* whole core */
   1467			mod->data_layout.size = strict_align(mod->data_layout.size);
   1468			break;
   1469		}
   1470	}
   1471
   1472	pr_debug("Init section allocation order:\n");
   1473	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
   1474		for (i = 0; i < info->hdr->e_shnum; ++i) {
   1475			Elf_Shdr *s = &info->sechdrs[i];
   1476			const char *sname = info->secstrings + s->sh_name;
   1477
   1478			if ((s->sh_flags & masks[m][0]) != masks[m][0]
   1479			    || (s->sh_flags & masks[m][1])
   1480			    || s->sh_entsize != ~0UL
   1481			    || !module_init_layout_section(sname))
   1482				continue;
   1483			s->sh_entsize = (module_get_offset(mod, &mod->init_layout.size, s, i)
   1484					 | INIT_OFFSET_MASK);
   1485			pr_debug("\t%s\n", sname);
   1486		}
   1487		switch (m) {
   1488		case 0: /* executable */
   1489			mod->init_layout.size = strict_align(mod->init_layout.size);
   1490			mod->init_layout.text_size = mod->init_layout.size;
   1491			break;
   1492		case 1: /* RO: text and ro-data */
   1493			mod->init_layout.size = strict_align(mod->init_layout.size);
   1494			mod->init_layout.ro_size = mod->init_layout.size;
   1495			break;
   1496		case 2:
   1497			/*
   1498			 * RO after init doesn't apply to init_layout (only
   1499			 * core_layout), so it just takes the value of ro_size.
   1500			 */
   1501			mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
   1502			break;
   1503		case 4: /* whole init */
   1504			mod->init_layout.size = strict_align(mod->init_layout.size);
   1505			break;
   1506		}
   1507	}
   1508}
   1509
   1510static void set_license(struct module *mod, const char *license)
   1511{
   1512	if (!license)
   1513		license = "unspecified";
   1514
   1515	if (!license_is_gpl_compatible(license)) {
   1516		if (!test_taint(TAINT_PROPRIETARY_MODULE))
   1517			pr_warn("%s: module license '%s' taints kernel.\n",
   1518				mod->name, license);
   1519		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
   1520				 LOCKDEP_NOW_UNRELIABLE);
   1521	}
   1522}
   1523
   1524/* Parse tag=value strings from .modinfo section */
   1525static char *next_string(char *string, unsigned long *secsize)
   1526{
   1527	/* Skip non-zero chars */
   1528	while (string[0]) {
   1529		string++;
   1530		if ((*secsize)-- <= 1)
   1531			return NULL;
   1532	}
   1533
   1534	/* Skip any zero padding. */
   1535	while (!string[0]) {
   1536		string++;
   1537		if ((*secsize)-- <= 1)
   1538			return NULL;
   1539	}
   1540	return string;
   1541}
   1542
   1543static char *get_next_modinfo(const struct load_info *info, const char *tag,
   1544			      char *prev)
   1545{
   1546	char *p;
   1547	unsigned int taglen = strlen(tag);
   1548	Elf_Shdr *infosec = &info->sechdrs[info->index.info];
   1549	unsigned long size = infosec->sh_size;
   1550
   1551	/*
   1552	 * get_modinfo() calls made before rewrite_section_headers()
   1553	 * must use sh_offset, as sh_addr isn't set!
   1554	 */
   1555	char *modinfo = (char *)info->hdr + infosec->sh_offset;
   1556
   1557	if (prev) {
   1558		size -= prev - modinfo;
   1559		modinfo = next_string(prev, &size);
   1560	}
   1561
   1562	for (p = modinfo; p; p = next_string(p, &size)) {
   1563		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
   1564			return p + taglen + 1;
   1565	}
   1566	return NULL;
   1567}
   1568
   1569static char *get_modinfo(const struct load_info *info, const char *tag)
   1570{
   1571	return get_next_modinfo(info, tag, NULL);
   1572}
   1573
   1574static void setup_modinfo(struct module *mod, struct load_info *info)
   1575{
   1576	struct module_attribute *attr;
   1577	int i;
   1578
   1579	for (i = 0; (attr = modinfo_attrs[i]); i++) {
   1580		if (attr->setup)
   1581			attr->setup(mod, get_modinfo(info, attr->attr.name));
   1582	}
   1583}
   1584
   1585static void free_modinfo(struct module *mod)
   1586{
   1587	struct module_attribute *attr;
   1588	int i;
   1589
   1590	for (i = 0; (attr = modinfo_attrs[i]); i++) {
   1591		if (attr->free)
   1592			attr->free(mod);
   1593	}
   1594}
   1595
   1596static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
   1597{
   1598	if (!debug)
   1599		return;
   1600	ddebug_add_module(debug, num, mod->name);
   1601}
   1602
   1603static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
   1604{
   1605	if (debug)
   1606		ddebug_remove_module(mod->name);
   1607}
   1608
   1609void * __weak module_alloc(unsigned long size)
   1610{
   1611	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
   1612			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
   1613			NUMA_NO_NODE, __builtin_return_address(0));
   1614}
   1615
   1616bool __weak module_init_section(const char *name)
   1617{
   1618	return strstarts(name, ".init");
   1619}
   1620
   1621bool __weak module_exit_section(const char *name)
   1622{
   1623	return strstarts(name, ".exit");
   1624}
   1625
   1626static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
   1627{
   1628#if defined(CONFIG_64BIT)
   1629	unsigned long long secend;
   1630#else
   1631	unsigned long secend;
   1632#endif
   1633
   1634	/*
   1635	 * Check for both overflow and offset/size being
   1636	 * too large.
   1637	 */
   1638	secend = shdr->sh_offset + shdr->sh_size;
   1639	if (secend < shdr->sh_offset || secend > info->len)
   1640		return -ENOEXEC;
   1641
   1642	return 0;
   1643}
   1644
   1645/*
   1646 * Sanity checks against invalid binaries, wrong arch, weird elf version.
   1647 *
   1648 * Also do basic validity checks against section offsets and sizes, the
   1649 * section name string table, and the indices used for it (sh_name).
   1650 */
   1651static int elf_validity_check(struct load_info *info)
   1652{
   1653	unsigned int i;
   1654	Elf_Shdr *shdr, *strhdr;
   1655	int err;
   1656
   1657	if (info->len < sizeof(*(info->hdr))) {
   1658		pr_err("Invalid ELF header len %lu\n", info->len);
   1659		goto no_exec;
   1660	}
   1661
   1662	if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) {
   1663		pr_err("Invalid ELF header magic: != %s\n", ELFMAG);
   1664		goto no_exec;
   1665	}
   1666	if (info->hdr->e_type != ET_REL) {
   1667		pr_err("Invalid ELF header type: %u != %u\n",
   1668		       info->hdr->e_type, ET_REL);
   1669		goto no_exec;
   1670	}
   1671	if (!elf_check_arch(info->hdr)) {
   1672		pr_err("Invalid architecture in ELF header: %u\n",
   1673		       info->hdr->e_machine);
   1674		goto no_exec;
   1675	}
   1676	if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) {
   1677		pr_err("Invalid ELF section header size\n");
   1678		goto no_exec;
   1679	}
   1680
   1681	/*
   1682	 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
   1683	 * known and small. So e_shnum * sizeof(Elf_Shdr)
   1684	 * will not overflow unsigned long on any platform.
   1685	 */
   1686	if (info->hdr->e_shoff >= info->len
   1687	    || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
   1688		info->len - info->hdr->e_shoff)) {
   1689		pr_err("Invalid ELF section header overflow\n");
   1690		goto no_exec;
   1691	}
   1692
   1693	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
   1694
   1695	/*
   1696	 * Verify if the section name table index is valid.
   1697	 */
   1698	if (info->hdr->e_shstrndx == SHN_UNDEF
   1699	    || info->hdr->e_shstrndx >= info->hdr->e_shnum) {
   1700		pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n",
   1701		       info->hdr->e_shstrndx, info->hdr->e_shstrndx,
   1702		       info->hdr->e_shnum);
   1703		goto no_exec;
   1704	}
   1705
   1706	strhdr = &info->sechdrs[info->hdr->e_shstrndx];
   1707	err = validate_section_offset(info, strhdr);
   1708	if (err < 0) {
   1709		pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type);
   1710		return err;
   1711	}
   1712
   1713	/*
   1714	 * The section name table must be NUL-terminated, as required
   1715	 * by the spec. This makes strcmp and pr_* calls that access
   1716	 * strings in the section safe.
   1717	 */
   1718	info->secstrings = (void *)info->hdr + strhdr->sh_offset;
   1719	if (strhdr->sh_size == 0) {
   1720		pr_err("empty section name table\n");
   1721		goto no_exec;
   1722	}
   1723	if (info->secstrings[strhdr->sh_size - 1] != '\0') {
   1724		pr_err("ELF Spec violation: section name table isn't null terminated\n");
   1725		goto no_exec;
   1726	}
   1727
   1728	/*
   1729	 * The code assumes that section 0 has a length of zero and
   1730	 * an addr of zero, so check for it.
   1731	 */
   1732	if (info->sechdrs[0].sh_type != SHT_NULL
   1733	    || info->sechdrs[0].sh_size != 0
   1734	    || info->sechdrs[0].sh_addr != 0) {
   1735		pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
   1736		       info->sechdrs[0].sh_type);
   1737		goto no_exec;
   1738	}
   1739
   1740	for (i = 1; i < info->hdr->e_shnum; i++) {
   1741		shdr = &info->sechdrs[i];
   1742		switch (shdr->sh_type) {
   1743		case SHT_NULL:
   1744		case SHT_NOBITS:
   1745			continue;
   1746		case SHT_SYMTAB:
   1747			if (shdr->sh_link == SHN_UNDEF
   1748			    || shdr->sh_link >= info->hdr->e_shnum) {
   1749				pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n",
   1750				       shdr->sh_link, shdr->sh_link,
   1751				       info->hdr->e_shnum);
   1752				goto no_exec;
   1753			}
   1754			fallthrough;
   1755		default:
   1756			err = validate_section_offset(info, shdr);
   1757			if (err < 0) {
   1758				pr_err("Invalid ELF section in module (section %u type %u)\n",
   1759					i, shdr->sh_type);
   1760				return err;
   1761			}
   1762
   1763			if (shdr->sh_flags & SHF_ALLOC) {
   1764				if (shdr->sh_name >= strhdr->sh_size) {
   1765					pr_err("Invalid ELF section name in module (section %u type %u)\n",
   1766					       i, shdr->sh_type);
   1767					return -ENOEXEC;
   1768				}
   1769			}
   1770			break;
   1771		}
   1772	}
   1773
   1774	return 0;
   1775
   1776no_exec:
   1777	return -ENOEXEC;
   1778}
   1779
   1780#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
   1781
   1782static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
   1783{
   1784	do {
   1785		unsigned long n = min(len, COPY_CHUNK_SIZE);
   1786
   1787		if (copy_from_user(dst, usrc, n) != 0)
   1788			return -EFAULT;
   1789		cond_resched();
   1790		dst += n;
   1791		usrc += n;
   1792		len -= n;
   1793	} while (len);
   1794	return 0;
   1795}
   1796
   1797static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
   1798{
   1799	if (!get_modinfo(info, "livepatch"))
   1800		/* Nothing more to do */
   1801		return 0;
   1802
   1803	if (set_livepatch_module(mod)) {
   1804		add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
   1805		pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
   1806				mod->name);
   1807		return 0;
   1808	}
   1809
   1810	pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
   1811	       mod->name);
   1812	return -ENOEXEC;
   1813}
   1814
   1815static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
   1816{
   1817	if (retpoline_module_ok(get_modinfo(info, "retpoline")))
   1818		return;
   1819
   1820	pr_warn("%s: loading module not compiled with retpoline compiler.\n",
   1821		mod->name);
   1822}
   1823
   1824/* Sets info->hdr and info->len. */
   1825static int copy_module_from_user(const void __user *umod, unsigned long len,
   1826				  struct load_info *info)
   1827{
   1828	int err;
   1829
   1830	info->len = len;
   1831	if (info->len < sizeof(*(info->hdr)))
   1832		return -ENOEXEC;
   1833
   1834	err = security_kernel_load_data(LOADING_MODULE, true);
   1835	if (err)
   1836		return err;
   1837
   1838	/* Suck in entire file: we'll want most of it. */
   1839	info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
   1840	if (!info->hdr)
   1841		return -ENOMEM;
   1842
   1843	if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
   1844		err = -EFAULT;
   1845		goto out;
   1846	}
   1847
   1848	err = security_kernel_post_load_data((char *)info->hdr, info->len,
   1849					     LOADING_MODULE, "init_module");
   1850out:
   1851	if (err)
   1852		vfree(info->hdr);
   1853
   1854	return err;
   1855}
   1856
   1857static void free_copy(struct load_info *info, int flags)
   1858{
   1859	if (flags & MODULE_INIT_COMPRESSED_FILE)
   1860		module_decompress_cleanup(info);
   1861	else
   1862		vfree(info->hdr);
   1863}
   1864
   1865static int rewrite_section_headers(struct load_info *info, int flags)
   1866{
   1867	unsigned int i;
   1868
   1869	/* This should always be true, but let's be sure. */
   1870	info->sechdrs[0].sh_addr = 0;
   1871
   1872	for (i = 1; i < info->hdr->e_shnum; i++) {
   1873		Elf_Shdr *shdr = &info->sechdrs[i];
   1874
   1875		/*
   1876		 * Mark all sections sh_addr with their address in the
   1877		 * temporary image.
   1878		 */
   1879		shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
   1880
   1881	}
   1882
   1883	/* Track but don't keep modinfo and version sections. */
   1884	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
   1885	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
   1886
   1887	return 0;
   1888}
   1889
   1890/*
   1891 * Set up our basic convenience variables (pointers to section headers,
   1892 * search for module section index etc), and do some basic section
   1893 * verification.
   1894 *
   1895 * Set info->mod to the temporary copy of the module in info->hdr. The final one
   1896 * will be allocated in move_module().
   1897 */
   1898static int setup_load_info(struct load_info *info, int flags)
   1899{
   1900	unsigned int i;
   1901
   1902	/* Try to find a name early so we can log errors with a module name */
   1903	info->index.info = find_sec(info, ".modinfo");
   1904	if (info->index.info)
   1905		info->name = get_modinfo(info, "name");
   1906
   1907	/* Find internal symbols and strings. */
   1908	for (i = 1; i < info->hdr->e_shnum; i++) {
   1909		if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
   1910			info->index.sym = i;
   1911			info->index.str = info->sechdrs[i].sh_link;
   1912			info->strtab = (char *)info->hdr
   1913				+ info->sechdrs[info->index.str].sh_offset;
   1914			break;
   1915		}
   1916	}
   1917
   1918	if (info->index.sym == 0) {
   1919		pr_warn("%s: module has no symbols (stripped?)\n",
   1920			info->name ?: "(missing .modinfo section or name field)");
   1921		return -ENOEXEC;
   1922	}
   1923
   1924	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
   1925	if (!info->index.mod) {
   1926		pr_warn("%s: No module found in object\n",
   1927			info->name ?: "(missing .modinfo section or name field)");
   1928		return -ENOEXEC;
   1929	}
   1930	/* This is temporary: point mod into copy of data. */
   1931	info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
   1932
   1933	/*
   1934	 * If we didn't load the .modinfo 'name' field earlier, fall back to
   1935	 * on-disk struct mod 'name' field.
   1936	 */
   1937	if (!info->name)
   1938		info->name = info->mod->name;
   1939
   1940	if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
   1941		info->index.vers = 0; /* Pretend no __versions section! */
   1942	else
   1943		info->index.vers = find_sec(info, "__versions");
   1944
   1945	info->index.pcpu = find_pcpusec(info);
   1946
   1947	return 0;
   1948}
   1949
   1950static int check_modinfo(struct module *mod, struct load_info *info, int flags)
   1951{
   1952	const char *modmagic = get_modinfo(info, "vermagic");
   1953	int err;
   1954
   1955	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
   1956		modmagic = NULL;
   1957
   1958	/* This is allowed: modprobe --force will invalidate it. */
   1959	if (!modmagic) {
   1960		err = try_to_force_load(mod, "bad vermagic");
   1961		if (err)
   1962			return err;
   1963	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
   1964		pr_err("%s: version magic '%s' should be '%s'\n",
   1965		       info->name, modmagic, vermagic);
   1966		return -ENOEXEC;
   1967	}
   1968
   1969	if (!get_modinfo(info, "intree")) {
   1970		if (!test_taint(TAINT_OOT_MODULE))
   1971			pr_warn("%s: loading out-of-tree module taints kernel.\n",
   1972				mod->name);
   1973		add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
   1974	}
   1975
   1976	check_modinfo_retpoline(mod, info);
   1977
   1978	if (get_modinfo(info, "staging")) {
   1979		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
   1980		pr_warn("%s: module is from the staging directory, the quality "
   1981			"is unknown, you have been warned.\n", mod->name);
   1982	}
   1983
   1984	err = check_modinfo_livepatch(mod, info);
   1985	if (err)
   1986		return err;
   1987
   1988	/* Set up license info based on the info section */
   1989	set_license(mod, get_modinfo(info, "license"));
   1990
   1991	return 0;
   1992}
   1993
   1994static int find_module_sections(struct module *mod, struct load_info *info)
   1995{
   1996	mod->kp = section_objs(info, "__param",
   1997			       sizeof(*mod->kp), &mod->num_kp);
   1998	mod->syms = section_objs(info, "__ksymtab",
   1999				 sizeof(*mod->syms), &mod->num_syms);
   2000	mod->crcs = section_addr(info, "__kcrctab");
   2001	mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
   2002				     sizeof(*mod->gpl_syms),
   2003				     &mod->num_gpl_syms);
   2004	mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
   2005
   2006#ifdef CONFIG_CONSTRUCTORS
   2007	mod->ctors = section_objs(info, ".ctors",
   2008				  sizeof(*mod->ctors), &mod->num_ctors);
   2009	if (!mod->ctors)
   2010		mod->ctors = section_objs(info, ".init_array",
   2011				sizeof(*mod->ctors), &mod->num_ctors);
   2012	else if (find_sec(info, ".init_array")) {
   2013		/*
   2014		 * This shouldn't happen with same compiler and binutils
   2015		 * building all parts of the module.
   2016		 */
   2017		pr_warn("%s: has both .ctors and .init_array.\n",
   2018		       mod->name);
   2019		return -EINVAL;
   2020	}
   2021#endif
   2022
   2023	mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
   2024						&mod->noinstr_text_size);
   2025
   2026#ifdef CONFIG_TRACEPOINTS
   2027	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
   2028					     sizeof(*mod->tracepoints_ptrs),
   2029					     &mod->num_tracepoints);
   2030#endif
   2031#ifdef CONFIG_TREE_SRCU
   2032	mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
   2033					     sizeof(*mod->srcu_struct_ptrs),
   2034					     &mod->num_srcu_structs);
   2035#endif
   2036#ifdef CONFIG_BPF_EVENTS
   2037	mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
   2038					   sizeof(*mod->bpf_raw_events),
   2039					   &mod->num_bpf_raw_events);
   2040#endif
   2041#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
   2042	mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
   2043#endif
   2044#ifdef CONFIG_JUMP_LABEL
   2045	mod->jump_entries = section_objs(info, "__jump_table",
   2046					sizeof(*mod->jump_entries),
   2047					&mod->num_jump_entries);
   2048#endif
   2049#ifdef CONFIG_EVENT_TRACING
   2050	mod->trace_events = section_objs(info, "_ftrace_events",
   2051					 sizeof(*mod->trace_events),
   2052					 &mod->num_trace_events);
   2053	mod->trace_evals = section_objs(info, "_ftrace_eval_map",
   2054					sizeof(*mod->trace_evals),
   2055					&mod->num_trace_evals);
   2056#endif
   2057#ifdef CONFIG_TRACING
   2058	mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
   2059					 sizeof(*mod->trace_bprintk_fmt_start),
   2060					 &mod->num_trace_bprintk_fmt);
   2061#endif
   2062#ifdef CONFIG_FTRACE_MCOUNT_RECORD
   2063	/* sechdrs[0].sh_size is always zero */
   2064	mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
   2065					     sizeof(*mod->ftrace_callsites),
   2066					     &mod->num_ftrace_callsites);
   2067#endif
   2068#ifdef CONFIG_FUNCTION_ERROR_INJECTION
   2069	mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
   2070					    sizeof(*mod->ei_funcs),
   2071					    &mod->num_ei_funcs);
   2072#endif
   2073#ifdef CONFIG_KPROBES
   2074	mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
   2075						&mod->kprobes_text_size);
   2076	mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
   2077						sizeof(unsigned long),
   2078						&mod->num_kprobe_blacklist);
   2079#endif
   2080#ifdef CONFIG_PRINTK_INDEX
   2081	mod->printk_index_start = section_objs(info, ".printk_index",
   2082					       sizeof(*mod->printk_index_start),
   2083					       &mod->printk_index_size);
   2084#endif
   2085#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
   2086	mod->static_call_sites = section_objs(info, ".static_call_sites",
   2087					      sizeof(*mod->static_call_sites),
   2088					      &mod->num_static_call_sites);
   2089#endif
   2090	mod->extable = section_objs(info, "__ex_table",
   2091				    sizeof(*mod->extable), &mod->num_exentries);
   2092
   2093	if (section_addr(info, "__obsparm"))
   2094		pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
   2095
   2096	info->debug = section_objs(info, "__dyndbg",
   2097				   sizeof(*info->debug), &info->num_debug);
   2098
   2099	return 0;
   2100}
   2101
   2102static int move_module(struct module *mod, struct load_info *info)
   2103{
   2104	int i;
   2105	void *ptr;
   2106
   2107	/* Do the allocs. */
   2108	ptr = module_alloc(mod->core_layout.size);
   2109	/*
   2110	 * The pointer to this block is stored in the module structure
   2111	 * which is inside the block. Just mark it as not being a
   2112	 * leak.
   2113	 */
   2114	kmemleak_not_leak(ptr);
   2115	if (!ptr)
   2116		return -ENOMEM;
   2117
   2118	memset(ptr, 0, mod->core_layout.size);
   2119	mod->core_layout.base = ptr;
   2120
   2121	if (mod->init_layout.size) {
   2122		ptr = module_alloc(mod->init_layout.size);
   2123		/*
   2124		 * The pointer to this block is stored in the module structure
   2125		 * which is inside the block. This block doesn't need to be
   2126		 * scanned as it contains data and code that will be freed
   2127		 * after the module is initialized.
   2128		 */
   2129		kmemleak_ignore(ptr);
   2130		if (!ptr) {
   2131			module_memfree(mod->core_layout.base);
   2132			return -ENOMEM;
   2133		}
   2134		memset(ptr, 0, mod->init_layout.size);
   2135		mod->init_layout.base = ptr;
   2136	} else
   2137		mod->init_layout.base = NULL;
   2138
   2139#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
   2140	/* Do the allocs. */
   2141	ptr = vmalloc(mod->data_layout.size);
   2142	/*
   2143	 * The pointer to this block is stored in the module structure
   2144	 * which is inside the block. Just mark it as not being a
   2145	 * leak.
   2146	 */
   2147	kmemleak_not_leak(ptr);
   2148	if (!ptr) {
   2149		module_memfree(mod->core_layout.base);
   2150		module_memfree(mod->init_layout.base);
   2151		return -ENOMEM;
   2152	}
   2153
   2154	memset(ptr, 0, mod->data_layout.size);
   2155	mod->data_layout.base = ptr;
   2156#endif
   2157	/* Transfer each section which specifies SHF_ALLOC */
   2158	pr_debug("final section addresses:\n");
   2159	for (i = 0; i < info->hdr->e_shnum; i++) {
   2160		void *dest;
   2161		Elf_Shdr *shdr = &info->sechdrs[i];
   2162
   2163		if (!(shdr->sh_flags & SHF_ALLOC))
   2164			continue;
   2165
   2166		if (shdr->sh_entsize & INIT_OFFSET_MASK)
   2167			dest = mod->init_layout.base
   2168				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
   2169		else if (!(shdr->sh_flags & SHF_EXECINSTR))
   2170			dest = mod->data_layout.base + shdr->sh_entsize;
   2171		else
   2172			dest = mod->core_layout.base + shdr->sh_entsize;
   2173
   2174		if (shdr->sh_type != SHT_NOBITS)
   2175			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
   2176		/* Update sh_addr to point to copy in image. */
   2177		shdr->sh_addr = (unsigned long)dest;
   2178		pr_debug("\t0x%lx %s\n",
   2179			 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
   2180	}
   2181
   2182	return 0;
   2183}
   2184
   2185static int check_module_license_and_versions(struct module *mod)
   2186{
   2187	int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
   2188
   2189	/*
   2190	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
   2191	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
   2192	 * using GPL-only symbols it needs.
   2193	 */
   2194	if (strcmp(mod->name, "ndiswrapper") == 0)
   2195		add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
   2196
   2197	/* driverloader was caught wrongly pretending to be under GPL */
   2198	if (strcmp(mod->name, "driverloader") == 0)
   2199		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
   2200				 LOCKDEP_NOW_UNRELIABLE);
   2201
   2202	/* lve claims to be GPL but upstream won't provide source */
   2203	if (strcmp(mod->name, "lve") == 0)
   2204		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
   2205				 LOCKDEP_NOW_UNRELIABLE);
   2206
   2207	if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
   2208		pr_warn("%s: module license taints kernel.\n", mod->name);
   2209
   2210#ifdef CONFIG_MODVERSIONS
   2211	if ((mod->num_syms && !mod->crcs) ||
   2212	    (mod->num_gpl_syms && !mod->gpl_crcs)) {
   2213		return try_to_force_load(mod,
   2214					 "no versions for exported symbols");
   2215	}
   2216#endif
   2217	return 0;
   2218}
   2219
   2220static void flush_module_icache(const struct module *mod)
   2221{
   2222	/*
   2223	 * Flush the instruction cache, since we've played with text.
   2224	 * Do it before processing of module parameters, so the module
   2225	 * can provide parameter accessor functions of its own.
   2226	 */
   2227	if (mod->init_layout.base)
   2228		flush_icache_range((unsigned long)mod->init_layout.base,
   2229				   (unsigned long)mod->init_layout.base
   2230				   + mod->init_layout.size);
   2231	flush_icache_range((unsigned long)mod->core_layout.base,
   2232			   (unsigned long)mod->core_layout.base + mod->core_layout.size);
   2233}
   2234
   2235int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
   2236				     Elf_Shdr *sechdrs,
   2237				     char *secstrings,
   2238				     struct module *mod)
   2239{
   2240	return 0;
   2241}
   2242
   2243/* module_blacklist is a comma-separated list of module names */
   2244static char *module_blacklist;
   2245static bool blacklisted(const char *module_name)
   2246{
   2247	const char *p;
   2248	size_t len;
   2249
   2250	if (!module_blacklist)
   2251		return false;
   2252
   2253	for (p = module_blacklist; *p; p += len) {
   2254		len = strcspn(p, ",");
   2255		if (strlen(module_name) == len && !memcmp(module_name, p, len))
   2256			return true;
   2257		if (p[len] == ',')
   2258			len++;
   2259	}
   2260	return false;
   2261}
   2262core_param(module_blacklist, module_blacklist, charp, 0400);
   2263
   2264static struct module *layout_and_allocate(struct load_info *info, int flags)
   2265{
   2266	struct module *mod;
   2267	unsigned int ndx;
   2268	int err;
   2269
   2270	err = check_modinfo(info->mod, info, flags);
   2271	if (err)
   2272		return ERR_PTR(err);
   2273
   2274	/* Allow arches to frob section contents and sizes.  */
   2275	err = module_frob_arch_sections(info->hdr, info->sechdrs,
   2276					info->secstrings, info->mod);
   2277	if (err < 0)
   2278		return ERR_PTR(err);
   2279
   2280	err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
   2281					  info->secstrings, info->mod);
   2282	if (err < 0)
   2283		return ERR_PTR(err);
   2284
   2285	/* We will do a special allocation for per-cpu sections later. */
   2286	info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
   2287
   2288	/*
   2289	 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
   2290	 * layout_sections() can put it in the right place.
   2291	 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
   2292	 */
   2293	ndx = find_sec(info, ".data..ro_after_init");
   2294	if (ndx)
   2295		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
   2296	/*
   2297	 * Mark the __jump_table section as ro_after_init as well: these data
   2298	 * structures are never modified, with the exception of entries that
   2299	 * refer to code in the __init section, which are annotated as such
   2300	 * at module load time.
   2301	 */
   2302	ndx = find_sec(info, "__jump_table");
   2303	if (ndx)
   2304		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
   2305
   2306	/*
   2307	 * Determine total sizes, and put offsets in sh_entsize.  For now
   2308	 * this is done generically; there doesn't appear to be any
   2309	 * special cases for the architectures.
   2310	 */
   2311	layout_sections(info->mod, info);
   2312	layout_symtab(info->mod, info);
   2313
   2314	/* Allocate and move to the final place */
   2315	err = move_module(info->mod, info);
   2316	if (err)
   2317		return ERR_PTR(err);
   2318
   2319	/* Module has been copied to its final place now: return it. */
   2320	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
   2321	kmemleak_load_module(mod, info);
   2322	return mod;
   2323}
   2324
   2325/* mod is no longer valid after this! */
   2326static void module_deallocate(struct module *mod, struct load_info *info)
   2327{
   2328	percpu_modfree(mod);
   2329	module_arch_freeing_init(mod);
   2330	module_memfree(mod->init_layout.base);
   2331	module_memfree(mod->core_layout.base);
   2332#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
   2333	vfree(mod->data_layout.base);
   2334#endif
   2335}
   2336
   2337int __weak module_finalize(const Elf_Ehdr *hdr,
   2338			   const Elf_Shdr *sechdrs,
   2339			   struct module *me)
   2340{
   2341	return 0;
   2342}
   2343
   2344static int post_relocation(struct module *mod, const struct load_info *info)
   2345{
   2346	/* Sort exception table now relocations are done. */
   2347	sort_extable(mod->extable, mod->extable + mod->num_exentries);
   2348
   2349	/* Copy relocated percpu area over. */
   2350	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
   2351		       info->sechdrs[info->index.pcpu].sh_size);
   2352
   2353	/* Setup kallsyms-specific fields. */
   2354	add_kallsyms(mod, info);
   2355
   2356	/* Arch-specific module finalizing. */
   2357	return module_finalize(info->hdr, info->sechdrs, mod);
   2358}
   2359
   2360/* Is this module of this name done loading?  No locks held. */
   2361static bool finished_loading(const char *name)
   2362{
   2363	struct module *mod;
   2364	bool ret;
   2365
   2366	/*
   2367	 * The module_mutex should not be a heavily contended lock;
   2368	 * if we get the occasional sleep here, we'll go an extra iteration
   2369	 * in the wait_event_interruptible(), which is harmless.
   2370	 */
   2371	sched_annotate_sleep();
   2372	mutex_lock(&module_mutex);
   2373	mod = find_module_all(name, strlen(name), true);
   2374	ret = !mod || mod->state == MODULE_STATE_LIVE;
   2375	mutex_unlock(&module_mutex);
   2376
   2377	return ret;
   2378}
   2379
   2380/* Call module constructors. */
   2381static void do_mod_ctors(struct module *mod)
   2382{
   2383#ifdef CONFIG_CONSTRUCTORS
   2384	unsigned long i;
   2385
   2386	for (i = 0; i < mod->num_ctors; i++)
   2387		mod->ctors[i]();
   2388#endif
   2389}
   2390
   2391/* For freeing module_init on success, in case kallsyms traversing */
   2392struct mod_initfree {
   2393	struct llist_node node;
   2394	void *module_init;
   2395};
   2396
   2397static void do_free_init(struct work_struct *w)
   2398{
   2399	struct llist_node *pos, *n, *list;
   2400	struct mod_initfree *initfree;
   2401
   2402	list = llist_del_all(&init_free_list);
   2403
   2404	synchronize_rcu();
   2405
   2406	llist_for_each_safe(pos, n, list) {
   2407		initfree = container_of(pos, struct mod_initfree, node);
   2408		module_memfree(initfree->module_init);
   2409		kfree(initfree);
   2410	}
   2411}
   2412
   2413/*
   2414 * This is where the real work happens.
   2415 *
   2416 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
   2417 * helper command 'lx-symbols'.
   2418 */
   2419static noinline int do_init_module(struct module *mod)
   2420{
   2421	int ret = 0;
   2422	struct mod_initfree *freeinit;
   2423
   2424	freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
   2425	if (!freeinit) {
   2426		ret = -ENOMEM;
   2427		goto fail;
   2428	}
   2429	freeinit->module_init = mod->init_layout.base;
   2430
   2431	do_mod_ctors(mod);
   2432	/* Start the module */
   2433	if (mod->init != NULL)
   2434		ret = do_one_initcall(mod->init);
   2435	if (ret < 0) {
   2436		goto fail_free_freeinit;
   2437	}
   2438	if (ret > 0) {
   2439		pr_warn("%s: '%s'->init suspiciously returned %d, it should "
   2440			"follow 0/-E convention\n"
   2441			"%s: loading module anyway...\n",
   2442			__func__, mod->name, ret, __func__);
   2443		dump_stack();
   2444	}
   2445
   2446	/* Now it's a first class citizen! */
   2447	mod->state = MODULE_STATE_LIVE;
   2448	blocking_notifier_call_chain(&module_notify_list,
   2449				     MODULE_STATE_LIVE, mod);
   2450
   2451	/* Delay uevent until module has finished its init routine */
   2452	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
   2453
   2454	/*
   2455	 * We need to finish all async code before the module init sequence
   2456	 * is done. This has potential to deadlock if synchronous module
   2457	 * loading is requested from async (which is not allowed!).
   2458	 *
   2459	 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
   2460	 * request_module() from async workers") for more details.
   2461	 */
   2462	if (!mod->async_probe_requested)
   2463		async_synchronize_full();
   2464
   2465	ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
   2466			mod->init_layout.size);
   2467	mutex_lock(&module_mutex);
   2468	/* Drop initial reference. */
   2469	module_put(mod);
   2470	trim_init_extable(mod);
   2471#ifdef CONFIG_KALLSYMS
   2472	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
   2473	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
   2474#endif
   2475	module_enable_ro(mod, true);
   2476	mod_tree_remove_init(mod);
   2477	module_arch_freeing_init(mod);
   2478	mod->init_layout.base = NULL;
   2479	mod->init_layout.size = 0;
   2480	mod->init_layout.ro_size = 0;
   2481	mod->init_layout.ro_after_init_size = 0;
   2482	mod->init_layout.text_size = 0;
   2483#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
   2484	/* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
   2485	mod->btf_data = NULL;
   2486#endif
   2487	/*
   2488	 * We want to free module_init, but be aware that kallsyms may be
   2489	 * walking this with preempt disabled.  In all the failure paths, we
   2490	 * call synchronize_rcu(), but we don't want to slow down the success
   2491	 * path. module_memfree() cannot be called in an interrupt, so do the
   2492	 * work and call synchronize_rcu() in a work queue.
   2493	 *
   2494	 * Note that module_alloc() on most architectures creates W+X page
   2495	 * mappings which won't be cleaned up until do_free_init() runs.  Any
   2496	 * code such as mark_rodata_ro() which depends on those mappings to
   2497	 * be cleaned up needs to sync with the queued work - ie
   2498	 * rcu_barrier()
   2499	 */
   2500	if (llist_add(&freeinit->node, &init_free_list))
   2501		schedule_work(&init_free_wq);
   2502
   2503	mutex_unlock(&module_mutex);
   2504	wake_up_all(&module_wq);
   2505
   2506	return 0;
   2507
   2508fail_free_freeinit:
   2509	kfree(freeinit);
   2510fail:
   2511	/* Try to protect us from buggy refcounters. */
   2512	mod->state = MODULE_STATE_GOING;
   2513	synchronize_rcu();
   2514	module_put(mod);
   2515	blocking_notifier_call_chain(&module_notify_list,
   2516				     MODULE_STATE_GOING, mod);
   2517	klp_module_going(mod);
   2518	ftrace_release_mod(mod);
   2519	free_module(mod);
   2520	wake_up_all(&module_wq);
   2521	return ret;
   2522}
   2523
   2524static int may_init_module(void)
   2525{
   2526	if (!capable(CAP_SYS_MODULE) || modules_disabled)
   2527		return -EPERM;
   2528
   2529	return 0;
   2530}
   2531
   2532/*
   2533 * We try to place it in the list now to make sure it's unique before
   2534 * we dedicate too many resources.  In particular, temporary percpu
   2535 * memory exhaustion.
   2536 */
   2537static int add_unformed_module(struct module *mod)
   2538{
   2539	int err;
   2540	struct module *old;
   2541
   2542	mod->state = MODULE_STATE_UNFORMED;
   2543
   2544again:
   2545	mutex_lock(&module_mutex);
   2546	old = find_module_all(mod->name, strlen(mod->name), true);
   2547	if (old != NULL) {
   2548		if (old->state != MODULE_STATE_LIVE) {
   2549			/* Wait in case it fails to load. */
   2550			mutex_unlock(&module_mutex);
   2551			err = wait_event_interruptible(module_wq,
   2552					       finished_loading(mod->name));
   2553			if (err)
   2554				goto out_unlocked;
   2555			goto again;
   2556		}
   2557		err = -EEXIST;
   2558		goto out;
   2559	}
   2560	mod_update_bounds(mod);
   2561	list_add_rcu(&mod->list, &modules);
   2562	mod_tree_insert(mod);
   2563	err = 0;
   2564
   2565out:
   2566	mutex_unlock(&module_mutex);
   2567out_unlocked:
   2568	return err;
   2569}
   2570
   2571static int complete_formation(struct module *mod, struct load_info *info)
   2572{
   2573	int err;
   2574
   2575	mutex_lock(&module_mutex);
   2576
   2577	/* Find duplicate symbols (must be called under lock). */
   2578	err = verify_exported_symbols(mod);
   2579	if (err < 0)
   2580		goto out;
   2581
   2582	/* This relies on module_mutex for list integrity. */
   2583	module_bug_finalize(info->hdr, info->sechdrs, mod);
   2584
   2585	if (module_check_misalignment(mod))
   2586		goto out_misaligned;
   2587
   2588	module_enable_ro(mod, false);
   2589	module_enable_nx(mod);
   2590	module_enable_x(mod);
   2591
   2592	/*
   2593	 * Mark state as coming so strong_try_module_get() ignores us,
   2594	 * but kallsyms etc. can see us.
   2595	 */
   2596	mod->state = MODULE_STATE_COMING;
   2597	mutex_unlock(&module_mutex);
   2598
   2599	return 0;
   2600
   2601out_misaligned:
   2602	err = -EINVAL;
   2603out:
   2604	mutex_unlock(&module_mutex);
   2605	return err;
   2606}
   2607
   2608static int prepare_coming_module(struct module *mod)
   2609{
   2610	int err;
   2611
   2612	ftrace_module_enable(mod);
   2613	err = klp_module_coming(mod);
   2614	if (err)
   2615		return err;
   2616
   2617	err = blocking_notifier_call_chain_robust(&module_notify_list,
   2618			MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
   2619	err = notifier_to_errno(err);
   2620	if (err)
   2621		klp_module_going(mod);
   2622
   2623	return err;
   2624}
   2625
   2626static int unknown_module_param_cb(char *param, char *val, const char *modname,
   2627				   void *arg)
   2628{
   2629	struct module *mod = arg;
   2630	int ret;
   2631
   2632	if (strcmp(param, "async_probe") == 0) {
   2633		mod->async_probe_requested = true;
   2634		return 0;
   2635	}
   2636
   2637	/* Check for magic 'dyndbg' arg */
   2638	ret = ddebug_dyndbg_module_param_cb(param, val, modname);
   2639	if (ret != 0)
   2640		pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
   2641	return 0;
   2642}
   2643
   2644static void cfi_init(struct module *mod);
   2645
   2646/*
   2647 * Allocate and load the module: note that size of section 0 is always
   2648 * zero, and we rely on this for optional sections.
   2649 */
   2650static int load_module(struct load_info *info, const char __user *uargs,
   2651		       int flags)
   2652{
   2653	struct module *mod;
   2654	long err = 0;
   2655	char *after_dashes;
   2656
   2657	/*
   2658	 * Do the signature check (if any) first. All that
   2659	 * the signature check needs is info->len, it does
   2660	 * not need any of the section info. That can be
   2661	 * set up later. This will minimize the chances
   2662	 * of a corrupt module causing problems before
   2663	 * we even get to the signature check.
   2664	 *
   2665	 * The check will also adjust info->len by stripping
   2666	 * off the sig length at the end of the module, making
   2667	 * checks against info->len more correct.
   2668	 */
   2669	err = module_sig_check(info, flags);
   2670	if (err)
   2671		goto free_copy;
   2672
   2673	/*
   2674	 * Do basic sanity checks against the ELF header and
   2675	 * sections.
   2676	 */
   2677	err = elf_validity_check(info);
   2678	if (err)
   2679		goto free_copy;
   2680
   2681	/*
   2682	 * Everything checks out, so set up the section info
   2683	 * in the info structure.
   2684	 */
   2685	err = setup_load_info(info, flags);
   2686	if (err)
   2687		goto free_copy;
   2688
   2689	/*
   2690	 * Now that we know we have the correct module name, check
   2691	 * if it's blacklisted.
   2692	 */
   2693	if (blacklisted(info->name)) {
   2694		err = -EPERM;
   2695		pr_err("Module %s is blacklisted\n", info->name);
   2696		goto free_copy;
   2697	}
   2698
   2699	err = rewrite_section_headers(info, flags);
   2700	if (err)
   2701		goto free_copy;
   2702
   2703	/* Check module struct version now, before we try to use module. */
   2704	if (!check_modstruct_version(info, info->mod)) {
   2705		err = -ENOEXEC;
   2706		goto free_copy;
   2707	}
   2708
   2709	/* Figure out module layout, and allocate all the memory. */
   2710	mod = layout_and_allocate(info, flags);
   2711	if (IS_ERR(mod)) {
   2712		err = PTR_ERR(mod);
   2713		goto free_copy;
   2714	}
   2715
   2716	audit_log_kern_module(mod->name);
   2717
   2718	/* Reserve our place in the list. */
   2719	err = add_unformed_module(mod);
   2720	if (err)
   2721		goto free_module;
   2722
   2723#ifdef CONFIG_MODULE_SIG
   2724	mod->sig_ok = info->sig_ok;
   2725	if (!mod->sig_ok) {
   2726		pr_notice_once("%s: module verification failed: signature "
   2727			       "and/or required key missing - tainting "
   2728			       "kernel\n", mod->name);
   2729		add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
   2730	}
   2731#endif
   2732
   2733	/* To avoid stressing percpu allocator, do this once we're unique. */
   2734	err = percpu_modalloc(mod, info);
   2735	if (err)
   2736		goto unlink_mod;
   2737
   2738	/* Now module is in final location, initialize linked lists, etc. */
   2739	err = module_unload_init(mod);
   2740	if (err)
   2741		goto unlink_mod;
   2742
   2743	init_param_lock(mod);
   2744
   2745	/*
   2746	 * Now we've got everything in the final locations, we can
   2747	 * find optional sections.
   2748	 */
   2749	err = find_module_sections(mod, info);
   2750	if (err)
   2751		goto free_unload;
   2752
   2753	err = check_module_license_and_versions(mod);
   2754	if (err)
   2755		goto free_unload;
   2756
   2757	/* Set up MODINFO_ATTR fields */
   2758	setup_modinfo(mod, info);
   2759
   2760	/* Fix up syms, so that st_value is a pointer to location. */
   2761	err = simplify_symbols(mod, info);
   2762	if (err < 0)
   2763		goto free_modinfo;
   2764
   2765	err = apply_relocations(mod, info);
   2766	if (err < 0)
   2767		goto free_modinfo;
   2768
   2769	err = post_relocation(mod, info);
   2770	if (err < 0)
   2771		goto free_modinfo;
   2772
   2773	flush_module_icache(mod);
   2774
   2775	/* Setup CFI for the module. */
   2776	cfi_init(mod);
   2777
   2778	/* Now copy in args */
   2779	mod->args = strndup_user(uargs, ~0UL >> 1);
   2780	if (IS_ERR(mod->args)) {
   2781		err = PTR_ERR(mod->args);
   2782		goto free_arch_cleanup;
   2783	}
   2784
   2785	init_build_id(mod, info);
   2786	dynamic_debug_setup(mod, info->debug, info->num_debug);
   2787
   2788	/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
   2789	ftrace_module_init(mod);
   2790
   2791	/* Finally it's fully formed, ready to start executing. */
   2792	err = complete_formation(mod, info);
   2793	if (err)
   2794		goto ddebug_cleanup;
   2795
   2796	err = prepare_coming_module(mod);
   2797	if (err)
   2798		goto bug_cleanup;
   2799
   2800	/* Module is ready to execute: parsing args may do that. */
   2801	after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
   2802				  -32768, 32767, mod,
   2803				  unknown_module_param_cb);
   2804	if (IS_ERR(after_dashes)) {
   2805		err = PTR_ERR(after_dashes);
   2806		goto coming_cleanup;
   2807	} else if (after_dashes) {
   2808		pr_warn("%s: parameters '%s' after `--' ignored\n",
   2809		       mod->name, after_dashes);
   2810	}
   2811
   2812	/* Link in to sysfs. */
   2813	err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
   2814	if (err < 0)
   2815		goto coming_cleanup;
   2816
   2817	if (is_livepatch_module(mod)) {
   2818		err = copy_module_elf(mod, info);
   2819		if (err < 0)
   2820			goto sysfs_cleanup;
   2821	}
   2822
   2823	/* Get rid of temporary copy. */
   2824	free_copy(info, flags);
   2825
   2826	/* Done! */
   2827	trace_module_load(mod);
   2828
   2829	return do_init_module(mod);
   2830
   2831 sysfs_cleanup:
   2832	mod_sysfs_teardown(mod);
   2833 coming_cleanup:
   2834	mod->state = MODULE_STATE_GOING;
   2835	destroy_params(mod->kp, mod->num_kp);
   2836	blocking_notifier_call_chain(&module_notify_list,
   2837				     MODULE_STATE_GOING, mod);
   2838	klp_module_going(mod);
   2839 bug_cleanup:
   2840	mod->state = MODULE_STATE_GOING;
   2841	/* module_bug_cleanup needs module_mutex protection */
   2842	mutex_lock(&module_mutex);
   2843	module_bug_cleanup(mod);
   2844	mutex_unlock(&module_mutex);
   2845
   2846 ddebug_cleanup:
   2847	ftrace_release_mod(mod);
   2848	dynamic_debug_remove(mod, info->debug);
   2849	synchronize_rcu();
   2850	kfree(mod->args);
   2851 free_arch_cleanup:
   2852	cfi_cleanup(mod);
   2853	module_arch_cleanup(mod);
   2854 free_modinfo:
   2855	free_modinfo(mod);
   2856 free_unload:
   2857	module_unload_free(mod);
   2858 unlink_mod:
   2859	mutex_lock(&module_mutex);
   2860	/* Unlink carefully: kallsyms could be walking list. */
   2861	list_del_rcu(&mod->list);
   2862	mod_tree_remove(mod);
   2863	wake_up_all(&module_wq);
   2864	/* Wait for RCU-sched synchronizing before releasing mod->list. */
   2865	synchronize_rcu();
   2866	mutex_unlock(&module_mutex);
   2867 free_module:
   2868	/* Free lock-classes; relies on the preceding sync_rcu() */
   2869	lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size);
   2870
   2871	module_deallocate(mod, info);
   2872 free_copy:
   2873	free_copy(info, flags);
   2874	return err;
   2875}
   2876
   2877SYSCALL_DEFINE3(init_module, void __user *, umod,
   2878		unsigned long, len, const char __user *, uargs)
   2879{
   2880	int err;
   2881	struct load_info info = { };
   2882
   2883	err = may_init_module();
   2884	if (err)
   2885		return err;
   2886
   2887	pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
   2888	       umod, len, uargs);
   2889
   2890	err = copy_module_from_user(umod, len, &info);
   2891	if (err)
   2892		return err;
   2893
   2894	return load_module(&info, uargs, 0);
   2895}
   2896
   2897SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
   2898{
   2899	struct load_info info = { };
   2900	void *buf = NULL;
   2901	int len;
   2902	int err;
   2903
   2904	err = may_init_module();
   2905	if (err)
   2906		return err;
   2907
   2908	pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
   2909
   2910	if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
   2911		      |MODULE_INIT_IGNORE_VERMAGIC
   2912		      |MODULE_INIT_COMPRESSED_FILE))
   2913		return -EINVAL;
   2914
   2915	len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL,
   2916				       READING_MODULE);
   2917	if (len < 0)
   2918		return len;
   2919
   2920	if (flags & MODULE_INIT_COMPRESSED_FILE) {
   2921		err = module_decompress(&info, buf, len);
   2922		vfree(buf); /* compressed data is no longer needed */
   2923		if (err)
   2924			return err;
   2925	} else {
   2926		info.hdr = buf;
   2927		info.len = len;
   2928	}
   2929
   2930	return load_module(&info, uargs, flags);
   2931}
   2932
   2933static inline int within(unsigned long addr, void *start, unsigned long size)
   2934{
   2935	return ((void *)addr >= start && (void *)addr < start + size);
   2936}
   2937
   2938static void cfi_init(struct module *mod)
   2939{
   2940#ifdef CONFIG_CFI_CLANG
   2941	initcall_t *init;
   2942	exitcall_t *exit;
   2943
   2944	rcu_read_lock_sched();
   2945	mod->cfi_check = (cfi_check_fn)
   2946		find_kallsyms_symbol_value(mod, "__cfi_check");
   2947	init = (initcall_t *)
   2948		find_kallsyms_symbol_value(mod, "__cfi_jt_init_module");
   2949	exit = (exitcall_t *)
   2950		find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module");
   2951	rcu_read_unlock_sched();
   2952
   2953	/* Fix init/exit functions to point to the CFI jump table */
   2954	if (init)
   2955		mod->init = *init;
   2956#ifdef CONFIG_MODULE_UNLOAD
   2957	if (exit)
   2958		mod->exit = *exit;
   2959#endif
   2960
   2961	cfi_module_add(mod, mod_tree.addr_min);
   2962#endif
   2963}
   2964
   2965static void cfi_cleanup(struct module *mod)
   2966{
   2967#ifdef CONFIG_CFI_CLANG
   2968	cfi_module_remove(mod, mod_tree.addr_min);
   2969#endif
   2970}
   2971
   2972/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
   2973char *module_flags(struct module *mod, char *buf)
   2974{
   2975	int bx = 0;
   2976
   2977	BUG_ON(mod->state == MODULE_STATE_UNFORMED);
   2978	if (mod->taints ||
   2979	    mod->state == MODULE_STATE_GOING ||
   2980	    mod->state == MODULE_STATE_COMING) {
   2981		buf[bx++] = '(';
   2982		bx += module_flags_taint(mod->taints, buf + bx);
   2983		/* Show a - for module-is-being-unloaded */
   2984		if (mod->state == MODULE_STATE_GOING)
   2985			buf[bx++] = '-';
   2986		/* Show a + for module-is-being-loaded */
   2987		if (mod->state == MODULE_STATE_COMING)
   2988			buf[bx++] = '+';
   2989		buf[bx++] = ')';
   2990	}
   2991	buf[bx] = '\0';
   2992
   2993	return buf;
   2994}
   2995
   2996/* Given an address, look for it in the module exception tables. */
   2997const struct exception_table_entry *search_module_extables(unsigned long addr)
   2998{
   2999	const struct exception_table_entry *e = NULL;
   3000	struct module *mod;
   3001
   3002	preempt_disable();
   3003	mod = __module_address(addr);
   3004	if (!mod)
   3005		goto out;
   3006
   3007	if (!mod->num_exentries)
   3008		goto out;
   3009
   3010	e = search_extable(mod->extable,
   3011			   mod->num_exentries,
   3012			   addr);
   3013out:
   3014	preempt_enable();
   3015
   3016	/*
   3017	 * Now, if we found one, we are running inside it now, hence
   3018	 * we cannot unload the module, hence no refcnt needed.
   3019	 */
   3020	return e;
   3021}
   3022
   3023/**
   3024 * is_module_address() - is this address inside a module?
   3025 * @addr: the address to check.
   3026 *
   3027 * See is_module_text_address() if you simply want to see if the address
   3028 * is code (not data).
   3029 */
   3030bool is_module_address(unsigned long addr)
   3031{
   3032	bool ret;
   3033
   3034	preempt_disable();
   3035	ret = __module_address(addr) != NULL;
   3036	preempt_enable();
   3037
   3038	return ret;
   3039}
   3040
   3041/**
   3042 * __module_address() - get the module which contains an address.
   3043 * @addr: the address.
   3044 *
   3045 * Must be called with preempt disabled or module mutex held so that
   3046 * module doesn't get freed during this.
   3047 */
   3048struct module *__module_address(unsigned long addr)
   3049{
   3050	struct module *mod;
   3051	struct mod_tree_root *tree;
   3052
   3053	if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max)
   3054		tree = &mod_tree;
   3055#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
   3056	else if (addr >= mod_data_tree.addr_min && addr <= mod_data_tree.addr_max)
   3057		tree = &mod_data_tree;
   3058#endif
   3059	else
   3060		return NULL;
   3061
   3062	module_assert_mutex_or_preempt();
   3063
   3064	mod = mod_find(addr, tree);
   3065	if (mod) {
   3066		BUG_ON(!within_module(addr, mod));
   3067		if (mod->state == MODULE_STATE_UNFORMED)
   3068			mod = NULL;
   3069	}
   3070	return mod;
   3071}
   3072
   3073/**
   3074 * is_module_text_address() - is this address inside module code?
   3075 * @addr: the address to check.
   3076 *
   3077 * See is_module_address() if you simply want to see if the address is
   3078 * anywhere in a module.  See kernel_text_address() for testing if an
   3079 * address corresponds to kernel or module code.
   3080 */
   3081bool is_module_text_address(unsigned long addr)
   3082{
   3083	bool ret;
   3084
   3085	preempt_disable();
   3086	ret = __module_text_address(addr) != NULL;
   3087	preempt_enable();
   3088
   3089	return ret;
   3090}
   3091
   3092/**
   3093 * __module_text_address() - get the module whose code contains an address.
   3094 * @addr: the address.
   3095 *
   3096 * Must be called with preempt disabled or module mutex held so that
   3097 * module doesn't get freed during this.
   3098 */
   3099struct module *__module_text_address(unsigned long addr)
   3100{
   3101	struct module *mod = __module_address(addr);
   3102	if (mod) {
   3103		/* Make sure it's within the text section. */
   3104		if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
   3105		    && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
   3106			mod = NULL;
   3107	}
   3108	return mod;
   3109}
   3110
   3111/* Don't grab lock, we're oopsing. */
   3112void print_modules(void)
   3113{
   3114	struct module *mod;
   3115	char buf[MODULE_FLAGS_BUF_SIZE];
   3116
   3117	printk(KERN_DEFAULT "Modules linked in:");
   3118	/* Most callers should already have preempt disabled, but make sure */
   3119	preempt_disable();
   3120	list_for_each_entry_rcu(mod, &modules, list) {
   3121		if (mod->state == MODULE_STATE_UNFORMED)
   3122			continue;
   3123		pr_cont(" %s%s", mod->name, module_flags(mod, buf));
   3124	}
   3125
   3126	print_unloaded_tainted_modules();
   3127	preempt_enable();
   3128	if (last_unloaded_module[0])
   3129		pr_cont(" [last unloaded: %s]", last_unloaded_module);
   3130	pr_cont("\n");
   3131}