cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kcore.c (16768B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *	fs/proc/kcore.c kernel ELF core dumper
      4 *
      5 *	Modelled on fs/exec.c:aout_core_dump()
      6 *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
      7 *	ELF version written by David Howells <David.Howells@nexor.co.uk>
      8 *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
      9 *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
     10 *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
     11 */
     12
     13#include <linux/crash_core.h>
     14#include <linux/mm.h>
     15#include <linux/proc_fs.h>
     16#include <linux/kcore.h>
     17#include <linux/user.h>
     18#include <linux/capability.h>
     19#include <linux/elf.h>
     20#include <linux/elfcore.h>
     21#include <linux/notifier.h>
     22#include <linux/vmalloc.h>
     23#include <linux/highmem.h>
     24#include <linux/printk.h>
     25#include <linux/memblock.h>
     26#include <linux/init.h>
     27#include <linux/slab.h>
     28#include <linux/uaccess.h>
     29#include <asm/io.h>
     30#include <linux/list.h>
     31#include <linux/ioport.h>
     32#include <linux/memory.h>
     33#include <linux/sched/task.h>
     34#include <linux/security.h>
     35#include <asm/sections.h>
     36#include "internal.h"
     37
     38#define CORE_STR "CORE"
     39
     40#ifndef ELF_CORE_EFLAGS
     41#define ELF_CORE_EFLAGS	0
     42#endif
     43
     44static struct proc_dir_entry *proc_root_kcore;
     45
     46
     47#ifndef kc_vaddr_to_offset
     48#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
     49#endif
     50#ifndef	kc_offset_to_vaddr
     51#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
     52#endif
     53
     54static LIST_HEAD(kclist_head);
     55static DECLARE_RWSEM(kclist_lock);
     56static int kcore_need_update = 1;
     57
     58/*
     59 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
     60 * Same as oldmem_pfn_is_ram in vmcore
     61 */
     62static int (*mem_pfn_is_ram)(unsigned long pfn);
     63
     64int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
     65{
     66	if (mem_pfn_is_ram)
     67		return -EBUSY;
     68	mem_pfn_is_ram = fn;
     69	return 0;
     70}
     71
     72static int pfn_is_ram(unsigned long pfn)
     73{
     74	if (mem_pfn_is_ram)
     75		return mem_pfn_is_ram(pfn);
     76	else
     77		return 1;
     78}
     79
     80/* This doesn't grab kclist_lock, so it should only be used at init time. */
     81void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
     82		       int type)
     83{
     84	new->addr = (unsigned long)addr;
     85	new->size = size;
     86	new->type = type;
     87
     88	list_add_tail(&new->list, &kclist_head);
     89}
     90
     91static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
     92			     size_t *data_offset)
     93{
     94	size_t try, size;
     95	struct kcore_list *m;
     96
     97	*nphdr = 1; /* PT_NOTE */
     98	size = 0;
     99
    100	list_for_each_entry(m, &kclist_head, list) {
    101		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
    102		if (try > size)
    103			size = try;
    104		*nphdr = *nphdr + 1;
    105	}
    106
    107	*phdrs_len = *nphdr * sizeof(struct elf_phdr);
    108	*notes_len = (4 * sizeof(struct elf_note) +
    109		      3 * ALIGN(sizeof(CORE_STR), 4) +
    110		      VMCOREINFO_NOTE_NAME_BYTES +
    111		      ALIGN(sizeof(struct elf_prstatus), 4) +
    112		      ALIGN(sizeof(struct elf_prpsinfo), 4) +
    113		      ALIGN(arch_task_struct_size, 4) +
    114		      ALIGN(vmcoreinfo_size, 4));
    115	*data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
    116				  *notes_len);
    117	return *data_offset + size;
    118}
    119
    120#ifdef CONFIG_HIGHMEM
    121/*
    122 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
    123 * because memory hole is not as big as !HIGHMEM case.
    124 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
    125 */
    126static int kcore_ram_list(struct list_head *head)
    127{
    128	struct kcore_list *ent;
    129
    130	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
    131	if (!ent)
    132		return -ENOMEM;
    133	ent->addr = (unsigned long)__va(0);
    134	ent->size = max_low_pfn << PAGE_SHIFT;
    135	ent->type = KCORE_RAM;
    136	list_add(&ent->list, head);
    137	return 0;
    138}
    139
    140#else /* !CONFIG_HIGHMEM */
    141
    142#ifdef CONFIG_SPARSEMEM_VMEMMAP
    143/* calculate vmemmap's address from given system ram pfn and register it */
    144static int
    145get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
    146{
    147	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
    148	unsigned long nr_pages = ent->size >> PAGE_SHIFT;
    149	unsigned long start, end;
    150	struct kcore_list *vmm, *tmp;
    151
    152
    153	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
    154	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
    155	end = PAGE_ALIGN(end);
    156	/* overlap check (because we have to align page */
    157	list_for_each_entry(tmp, head, list) {
    158		if (tmp->type != KCORE_VMEMMAP)
    159			continue;
    160		if (start < tmp->addr + tmp->size)
    161			if (end > tmp->addr)
    162				end = tmp->addr;
    163	}
    164	if (start < end) {
    165		vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
    166		if (!vmm)
    167			return 0;
    168		vmm->addr = start;
    169		vmm->size = end - start;
    170		vmm->type = KCORE_VMEMMAP;
    171		list_add_tail(&vmm->list, head);
    172	}
    173	return 1;
    174
    175}
    176#else
    177static int
    178get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
    179{
    180	return 1;
    181}
    182
    183#endif
    184
    185static int
    186kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
    187{
    188	struct list_head *head = (struct list_head *)arg;
    189	struct kcore_list *ent;
    190	struct page *p;
    191
    192	if (!pfn_valid(pfn))
    193		return 1;
    194
    195	p = pfn_to_page(pfn);
    196
    197	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
    198	if (!ent)
    199		return -ENOMEM;
    200	ent->addr = (unsigned long)page_to_virt(p);
    201	ent->size = nr_pages << PAGE_SHIFT;
    202
    203	if (!virt_addr_valid(ent->addr))
    204		goto free_out;
    205
    206	/* cut not-mapped area. ....from ppc-32 code. */
    207	if (ULONG_MAX - ent->addr < ent->size)
    208		ent->size = ULONG_MAX - ent->addr;
    209
    210	/*
    211	 * We've already checked virt_addr_valid so we know this address
    212	 * is a valid pointer, therefore we can check against it to determine
    213	 * if we need to trim
    214	 */
    215	if (VMALLOC_START > ent->addr) {
    216		if (VMALLOC_START - ent->addr < ent->size)
    217			ent->size = VMALLOC_START - ent->addr;
    218	}
    219
    220	ent->type = KCORE_RAM;
    221	list_add_tail(&ent->list, head);
    222
    223	if (!get_sparsemem_vmemmap_info(ent, head)) {
    224		list_del(&ent->list);
    225		goto free_out;
    226	}
    227
    228	return 0;
    229free_out:
    230	kfree(ent);
    231	return 1;
    232}
    233
    234static int kcore_ram_list(struct list_head *list)
    235{
    236	int nid, ret;
    237	unsigned long end_pfn;
    238
    239	/* Not inialized....update now */
    240	/* find out "max pfn" */
    241	end_pfn = 0;
    242	for_each_node_state(nid, N_MEMORY) {
    243		unsigned long node_end;
    244		node_end = node_end_pfn(nid);
    245		if (end_pfn < node_end)
    246			end_pfn = node_end;
    247	}
    248	/* scan 0 to max_pfn */
    249	ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
    250	if (ret)
    251		return -ENOMEM;
    252	return 0;
    253}
    254#endif /* CONFIG_HIGHMEM */
    255
    256static int kcore_update_ram(void)
    257{
    258	LIST_HEAD(list);
    259	LIST_HEAD(garbage);
    260	int nphdr;
    261	size_t phdrs_len, notes_len, data_offset;
    262	struct kcore_list *tmp, *pos;
    263	int ret = 0;
    264
    265	down_write(&kclist_lock);
    266	if (!xchg(&kcore_need_update, 0))
    267		goto out;
    268
    269	ret = kcore_ram_list(&list);
    270	if (ret) {
    271		/* Couldn't get the RAM list, try again next time. */
    272		WRITE_ONCE(kcore_need_update, 1);
    273		list_splice_tail(&list, &garbage);
    274		goto out;
    275	}
    276
    277	list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
    278		if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
    279			list_move(&pos->list, &garbage);
    280	}
    281	list_splice_tail(&list, &kclist_head);
    282
    283	proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
    284					       &data_offset);
    285
    286out:
    287	up_write(&kclist_lock);
    288	list_for_each_entry_safe(pos, tmp, &garbage, list) {
    289		list_del(&pos->list);
    290		kfree(pos);
    291	}
    292	return ret;
    293}
    294
    295static void append_kcore_note(char *notes, size_t *i, const char *name,
    296			      unsigned int type, const void *desc,
    297			      size_t descsz)
    298{
    299	struct elf_note *note = (struct elf_note *)&notes[*i];
    300
    301	note->n_namesz = strlen(name) + 1;
    302	note->n_descsz = descsz;
    303	note->n_type = type;
    304	*i += sizeof(*note);
    305	memcpy(&notes[*i], name, note->n_namesz);
    306	*i = ALIGN(*i + note->n_namesz, 4);
    307	memcpy(&notes[*i], desc, descsz);
    308	*i = ALIGN(*i + descsz, 4);
    309}
    310
    311static ssize_t
    312read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
    313{
    314	char *buf = file->private_data;
    315	size_t phdrs_offset, notes_offset, data_offset;
    316	size_t page_offline_frozen = 1;
    317	size_t phdrs_len, notes_len;
    318	struct kcore_list *m;
    319	size_t tsz;
    320	int nphdr;
    321	unsigned long start;
    322	size_t orig_buflen = buflen;
    323	int ret = 0;
    324
    325	down_read(&kclist_lock);
    326	/*
    327	 * Don't race against drivers that set PageOffline() and expect no
    328	 * further page access.
    329	 */
    330	page_offline_freeze();
    331
    332	get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
    333	phdrs_offset = sizeof(struct elfhdr);
    334	notes_offset = phdrs_offset + phdrs_len;
    335
    336	/* ELF file header. */
    337	if (buflen && *fpos < sizeof(struct elfhdr)) {
    338		struct elfhdr ehdr = {
    339			.e_ident = {
    340				[EI_MAG0] = ELFMAG0,
    341				[EI_MAG1] = ELFMAG1,
    342				[EI_MAG2] = ELFMAG2,
    343				[EI_MAG3] = ELFMAG3,
    344				[EI_CLASS] = ELF_CLASS,
    345				[EI_DATA] = ELF_DATA,
    346				[EI_VERSION] = EV_CURRENT,
    347				[EI_OSABI] = ELF_OSABI,
    348			},
    349			.e_type = ET_CORE,
    350			.e_machine = ELF_ARCH,
    351			.e_version = EV_CURRENT,
    352			.e_phoff = sizeof(struct elfhdr),
    353			.e_flags = ELF_CORE_EFLAGS,
    354			.e_ehsize = sizeof(struct elfhdr),
    355			.e_phentsize = sizeof(struct elf_phdr),
    356			.e_phnum = nphdr,
    357		};
    358
    359		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
    360		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
    361			ret = -EFAULT;
    362			goto out;
    363		}
    364
    365		buffer += tsz;
    366		buflen -= tsz;
    367		*fpos += tsz;
    368	}
    369
    370	/* ELF program headers. */
    371	if (buflen && *fpos < phdrs_offset + phdrs_len) {
    372		struct elf_phdr *phdrs, *phdr;
    373
    374		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
    375		if (!phdrs) {
    376			ret = -ENOMEM;
    377			goto out;
    378		}
    379
    380		phdrs[0].p_type = PT_NOTE;
    381		phdrs[0].p_offset = notes_offset;
    382		phdrs[0].p_filesz = notes_len;
    383
    384		phdr = &phdrs[1];
    385		list_for_each_entry(m, &kclist_head, list) {
    386			phdr->p_type = PT_LOAD;
    387			phdr->p_flags = PF_R | PF_W | PF_X;
    388			phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
    389			phdr->p_vaddr = (size_t)m->addr;
    390			if (m->type == KCORE_RAM)
    391				phdr->p_paddr = __pa(m->addr);
    392			else if (m->type == KCORE_TEXT)
    393				phdr->p_paddr = __pa_symbol(m->addr);
    394			else
    395				phdr->p_paddr = (elf_addr_t)-1;
    396			phdr->p_filesz = phdr->p_memsz = m->size;
    397			phdr->p_align = PAGE_SIZE;
    398			phdr++;
    399		}
    400
    401		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
    402		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
    403				 tsz)) {
    404			kfree(phdrs);
    405			ret = -EFAULT;
    406			goto out;
    407		}
    408		kfree(phdrs);
    409
    410		buffer += tsz;
    411		buflen -= tsz;
    412		*fpos += tsz;
    413	}
    414
    415	/* ELF note segment. */
    416	if (buflen && *fpos < notes_offset + notes_len) {
    417		struct elf_prstatus prstatus = {};
    418		struct elf_prpsinfo prpsinfo = {
    419			.pr_sname = 'R',
    420			.pr_fname = "vmlinux",
    421		};
    422		char *notes;
    423		size_t i = 0;
    424
    425		strlcpy(prpsinfo.pr_psargs, saved_command_line,
    426			sizeof(prpsinfo.pr_psargs));
    427
    428		notes = kzalloc(notes_len, GFP_KERNEL);
    429		if (!notes) {
    430			ret = -ENOMEM;
    431			goto out;
    432		}
    433
    434		append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
    435				  sizeof(prstatus));
    436		append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
    437				  sizeof(prpsinfo));
    438		append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
    439				  arch_task_struct_size);
    440		/*
    441		 * vmcoreinfo_size is mostly constant after init time, but it
    442		 * can be changed by crash_save_vmcoreinfo(). Racing here with a
    443		 * panic on another CPU before the machine goes down is insanely
    444		 * unlikely, but it's better to not leave potential buffer
    445		 * overflows lying around, regardless.
    446		 */
    447		append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
    448				  vmcoreinfo_data,
    449				  min(vmcoreinfo_size, notes_len - i));
    450
    451		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
    452		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
    453			kfree(notes);
    454			ret = -EFAULT;
    455			goto out;
    456		}
    457		kfree(notes);
    458
    459		buffer += tsz;
    460		buflen -= tsz;
    461		*fpos += tsz;
    462	}
    463
    464	/*
    465	 * Check to see if our file offset matches with any of
    466	 * the addresses in the elf_phdr on our list.
    467	 */
    468	start = kc_offset_to_vaddr(*fpos - data_offset);
    469	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
    470		tsz = buflen;
    471
    472	m = NULL;
    473	while (buflen) {
    474		struct page *page;
    475		unsigned long pfn;
    476
    477		/*
    478		 * If this is the first iteration or the address is not within
    479		 * the previous entry, search for a matching entry.
    480		 */
    481		if (!m || start < m->addr || start >= m->addr + m->size) {
    482			struct kcore_list *iter;
    483
    484			m = NULL;
    485			list_for_each_entry(iter, &kclist_head, list) {
    486				if (start >= iter->addr &&
    487				    start < iter->addr + iter->size) {
    488					m = iter;
    489					break;
    490				}
    491			}
    492		}
    493
    494		if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
    495			page_offline_thaw();
    496			cond_resched();
    497			page_offline_freeze();
    498		}
    499
    500		if (!m) {
    501			if (clear_user(buffer, tsz)) {
    502				ret = -EFAULT;
    503				goto out;
    504			}
    505			goto skip;
    506		}
    507
    508		switch (m->type) {
    509		case KCORE_VMALLOC:
    510			vread(buf, (char *)start, tsz);
    511			/* we have to zero-fill user buffer even if no read */
    512			if (copy_to_user(buffer, buf, tsz)) {
    513				ret = -EFAULT;
    514				goto out;
    515			}
    516			break;
    517		case KCORE_USER:
    518			/* User page is handled prior to normal kernel page: */
    519			if (copy_to_user(buffer, (char *)start, tsz)) {
    520				ret = -EFAULT;
    521				goto out;
    522			}
    523			break;
    524		case KCORE_RAM:
    525			pfn = __pa(start) >> PAGE_SHIFT;
    526			page = pfn_to_online_page(pfn);
    527
    528			/*
    529			 * Don't read offline sections, logically offline pages
    530			 * (e.g., inflated in a balloon), hwpoisoned pages,
    531			 * and explicitly excluded physical ranges.
    532			 */
    533			if (!page || PageOffline(page) ||
    534			    is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
    535				if (clear_user(buffer, tsz)) {
    536					ret = -EFAULT;
    537					goto out;
    538				}
    539				break;
    540			}
    541			fallthrough;
    542		case KCORE_VMEMMAP:
    543		case KCORE_TEXT:
    544			if (kern_addr_valid(start)) {
    545				/*
    546				 * Using bounce buffer to bypass the
    547				 * hardened user copy kernel text checks.
    548				 */
    549				if (copy_from_kernel_nofault(buf, (void *)start,
    550						tsz)) {
    551					if (clear_user(buffer, tsz)) {
    552						ret = -EFAULT;
    553						goto out;
    554					}
    555				} else {
    556					if (copy_to_user(buffer, buf, tsz)) {
    557						ret = -EFAULT;
    558						goto out;
    559					}
    560				}
    561			} else {
    562				if (clear_user(buffer, tsz)) {
    563					ret = -EFAULT;
    564					goto out;
    565				}
    566			}
    567			break;
    568		default:
    569			pr_warn_once("Unhandled KCORE type: %d\n", m->type);
    570			if (clear_user(buffer, tsz)) {
    571				ret = -EFAULT;
    572				goto out;
    573			}
    574		}
    575skip:
    576		buflen -= tsz;
    577		*fpos += tsz;
    578		buffer += tsz;
    579		start += tsz;
    580		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
    581	}
    582
    583out:
    584	page_offline_thaw();
    585	up_read(&kclist_lock);
    586	if (ret)
    587		return ret;
    588	return orig_buflen - buflen;
    589}
    590
    591static int open_kcore(struct inode *inode, struct file *filp)
    592{
    593	int ret = security_locked_down(LOCKDOWN_KCORE);
    594
    595	if (!capable(CAP_SYS_RAWIO))
    596		return -EPERM;
    597
    598	if (ret)
    599		return ret;
    600
    601	filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
    602	if (!filp->private_data)
    603		return -ENOMEM;
    604
    605	if (kcore_need_update)
    606		kcore_update_ram();
    607	if (i_size_read(inode) != proc_root_kcore->size) {
    608		inode_lock(inode);
    609		i_size_write(inode, proc_root_kcore->size);
    610		inode_unlock(inode);
    611	}
    612	return 0;
    613}
    614
    615static int release_kcore(struct inode *inode, struct file *file)
    616{
    617	kfree(file->private_data);
    618	return 0;
    619}
    620
    621static const struct proc_ops kcore_proc_ops = {
    622	.proc_read	= read_kcore,
    623	.proc_open	= open_kcore,
    624	.proc_release	= release_kcore,
    625	.proc_lseek	= default_llseek,
    626};
    627
    628/* just remember that we have to update kcore */
    629static int __meminit kcore_callback(struct notifier_block *self,
    630				    unsigned long action, void *arg)
    631{
    632	switch (action) {
    633	case MEM_ONLINE:
    634	case MEM_OFFLINE:
    635		kcore_need_update = 1;
    636		break;
    637	}
    638	return NOTIFY_OK;
    639}
    640
    641static struct notifier_block kcore_callback_nb __meminitdata = {
    642	.notifier_call = kcore_callback,
    643	.priority = 0,
    644};
    645
    646static struct kcore_list kcore_vmalloc;
    647
    648#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
    649static struct kcore_list kcore_text;
    650/*
    651 * If defined, special segment is used for mapping kernel text instead of
    652 * direct-map area. We need to create special TEXT section.
    653 */
    654static void __init proc_kcore_text_init(void)
    655{
    656	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
    657}
    658#else
    659static void __init proc_kcore_text_init(void)
    660{
    661}
    662#endif
    663
    664#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
    665/*
    666 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
    667 */
    668static struct kcore_list kcore_modules;
    669static void __init add_modules_range(void)
    670{
    671	if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
    672		kclist_add(&kcore_modules, (void *)MODULES_VADDR,
    673			MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
    674	}
    675}
    676#else
    677static void __init add_modules_range(void)
    678{
    679}
    680#endif
    681
    682static int __init proc_kcore_init(void)
    683{
    684	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
    685	if (!proc_root_kcore) {
    686		pr_err("couldn't create /proc/kcore\n");
    687		return 0; /* Always returns 0. */
    688	}
    689	/* Store text area if it's special */
    690	proc_kcore_text_init();
    691	/* Store vmalloc area */
    692	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
    693		VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
    694	add_modules_range();
    695	/* Store direct-map area from physical memory map */
    696	kcore_update_ram();
    697	register_hotmemory_notifier(&kcore_callback_nb);
    698
    699	return 0;
    700}
    701fs_initcall(proc_kcore_init);