cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vdso.c (7322B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
      4 *                    <benh@kernel.crashing.org>
      5 * Copyright (C) 2012 ARM Limited
      6 * Copyright (C) 2015 Regents of the University of California
      7 */
      8
      9#include <linux/elf.h>
     10#include <linux/mm.h>
     11#include <linux/slab.h>
     12#include <linux/binfmts.h>
     13#include <linux/err.h>
     14#include <asm/page.h>
     15#include <asm/vdso.h>
     16#include <linux/time_namespace.h>
     17
     18#ifdef CONFIG_GENERIC_TIME_VSYSCALL
     19#include <vdso/datapage.h>
     20#else
     21struct vdso_data {
     22};
     23#endif
     24
     25extern char vdso_start[], vdso_end[];
     26#ifdef CONFIG_COMPAT
     27extern char compat_vdso_start[], compat_vdso_end[];
     28#endif
     29
     30enum vvar_pages {
     31	VVAR_DATA_PAGE_OFFSET,
     32	VVAR_TIMENS_PAGE_OFFSET,
     33	VVAR_NR_PAGES,
     34};
     35
     36enum rv_vdso_map {
     37	RV_VDSO_MAP_VVAR,
     38	RV_VDSO_MAP_VDSO,
     39};
     40
     41#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
     42
     43/*
     44 * The vDSO data page.
     45 */
     46static union {
     47	struct vdso_data	data;
     48	u8			page[PAGE_SIZE];
     49} vdso_data_store __page_aligned_data;
     50struct vdso_data *vdso_data = &vdso_data_store.data;
     51
     52struct __vdso_info {
     53	const char *name;
     54	const char *vdso_code_start;
     55	const char *vdso_code_end;
     56	unsigned long vdso_pages;
     57	/* Data Mapping */
     58	struct vm_special_mapping *dm;
     59	/* Code Mapping */
     60	struct vm_special_mapping *cm;
     61};
     62
     63static int vdso_mremap(const struct vm_special_mapping *sm,
     64		       struct vm_area_struct *new_vma)
     65{
     66	current->mm->context.vdso = (void *)new_vma->vm_start;
     67
     68	return 0;
     69}
     70
     71static void __init __vdso_init(struct __vdso_info *vdso_info)
     72{
     73	unsigned int i;
     74	struct page **vdso_pagelist;
     75	unsigned long pfn;
     76
     77	if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
     78		panic("vDSO is not a valid ELF object!\n");
     79
     80	vdso_info->vdso_pages = (
     81		vdso_info->vdso_code_end -
     82		vdso_info->vdso_code_start) >>
     83		PAGE_SHIFT;
     84
     85	vdso_pagelist = kcalloc(vdso_info->vdso_pages,
     86				sizeof(struct page *),
     87				GFP_KERNEL);
     88	if (vdso_pagelist == NULL)
     89		panic("vDSO kcalloc failed!\n");
     90
     91	/* Grab the vDSO code pages. */
     92	pfn = sym_to_pfn(vdso_info->vdso_code_start);
     93
     94	for (i = 0; i < vdso_info->vdso_pages; i++)
     95		vdso_pagelist[i] = pfn_to_page(pfn + i);
     96
     97	vdso_info->cm->pages = vdso_pagelist;
     98}
     99
    100#ifdef CONFIG_TIME_NS
    101struct vdso_data *arch_get_vdso_data(void *vvar_page)
    102{
    103	return (struct vdso_data *)(vvar_page);
    104}
    105
    106/*
    107 * The vvar mapping contains data for a specific time namespace, so when a task
    108 * changes namespace we must unmap its vvar data for the old namespace.
    109 * Subsequent faults will map in data for the new namespace.
    110 *
    111 * For more details see timens_setup_vdso_data().
    112 */
    113int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
    114{
    115	struct mm_struct *mm = task->mm;
    116	struct vm_area_struct *vma;
    117	struct __vdso_info *vdso_info = mm->context.vdso_info;
    118
    119	mmap_read_lock(mm);
    120
    121	for (vma = mm->mmap; vma; vma = vma->vm_next) {
    122		unsigned long size = vma->vm_end - vma->vm_start;
    123
    124		if (vma_is_special_mapping(vma, vdso_info->dm))
    125			zap_page_range(vma, vma->vm_start, size);
    126	}
    127
    128	mmap_read_unlock(mm);
    129	return 0;
    130}
    131
    132static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
    133{
    134	if (likely(vma->vm_mm == current->mm))
    135		return current->nsproxy->time_ns->vvar_page;
    136
    137	/*
    138	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
    139	 * through interfaces like /proc/$pid/mem or
    140	 * process_vm_{readv,writev}() as long as there's no .access()
    141	 * in special_mapping_vmops.
    142	 * For more details check_vma_flags() and __access_remote_vm()
    143	 */
    144	WARN(1, "vvar_page accessed remotely");
    145
    146	return NULL;
    147}
    148#else
    149static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
    150{
    151	return NULL;
    152}
    153#endif
    154
    155static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
    156			     struct vm_area_struct *vma, struct vm_fault *vmf)
    157{
    158	struct page *timens_page = find_timens_vvar_page(vma);
    159	unsigned long pfn;
    160
    161	switch (vmf->pgoff) {
    162	case VVAR_DATA_PAGE_OFFSET:
    163		if (timens_page)
    164			pfn = page_to_pfn(timens_page);
    165		else
    166			pfn = sym_to_pfn(vdso_data);
    167		break;
    168#ifdef CONFIG_TIME_NS
    169	case VVAR_TIMENS_PAGE_OFFSET:
    170		/*
    171		 * If a task belongs to a time namespace then a namespace
    172		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
    173		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
    174		 * offset.
    175		 * See also the comment near timens_setup_vdso_data().
    176		 */
    177		if (!timens_page)
    178			return VM_FAULT_SIGBUS;
    179		pfn = sym_to_pfn(vdso_data);
    180		break;
    181#endif /* CONFIG_TIME_NS */
    182	default:
    183		return VM_FAULT_SIGBUS;
    184	}
    185
    186	return vmf_insert_pfn(vma, vmf->address, pfn);
    187}
    188
    189static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
    190	[RV_VDSO_MAP_VVAR] = {
    191		.name   = "[vvar]",
    192		.fault = vvar_fault,
    193	},
    194	[RV_VDSO_MAP_VDSO] = {
    195		.name   = "[vdso]",
    196		.mremap = vdso_mremap,
    197	},
    198};
    199
    200static struct __vdso_info vdso_info __ro_after_init = {
    201	.name = "vdso",
    202	.vdso_code_start = vdso_start,
    203	.vdso_code_end = vdso_end,
    204	.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
    205	.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
    206};
    207
    208#ifdef CONFIG_COMPAT
    209static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
    210	[RV_VDSO_MAP_VVAR] = {
    211		.name   = "[vvar]",
    212		.fault = vvar_fault,
    213	},
    214	[RV_VDSO_MAP_VDSO] = {
    215		.name   = "[vdso]",
    216		.mremap = vdso_mremap,
    217	},
    218};
    219
    220static struct __vdso_info compat_vdso_info __ro_after_init = {
    221	.name = "compat_vdso",
    222	.vdso_code_start = compat_vdso_start,
    223	.vdso_code_end = compat_vdso_end,
    224	.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
    225	.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
    226};
    227#endif
    228
    229static int __init vdso_init(void)
    230{
    231	__vdso_init(&vdso_info);
    232#ifdef CONFIG_COMPAT
    233	__vdso_init(&compat_vdso_info);
    234#endif
    235
    236	return 0;
    237}
    238arch_initcall(vdso_init);
    239
    240static int __setup_additional_pages(struct mm_struct *mm,
    241				    struct linux_binprm *bprm,
    242				    int uses_interp,
    243				    struct __vdso_info *vdso_info)
    244{
    245	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
    246	void *ret;
    247
    248	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
    249
    250	vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
    251	/* Be sure to map the data page */
    252	vdso_mapping_len = vdso_text_len + VVAR_SIZE;
    253
    254	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
    255	if (IS_ERR_VALUE(vdso_base)) {
    256		ret = ERR_PTR(vdso_base);
    257		goto up_fail;
    258	}
    259
    260	ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
    261		(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
    262	if (IS_ERR(ret))
    263		goto up_fail;
    264
    265	vdso_base += VVAR_SIZE;
    266	mm->context.vdso = (void *)vdso_base;
    267	mm->context.vdso_info = (void *)vdso_info;
    268
    269	ret =
    270	   _install_special_mapping(mm, vdso_base, vdso_text_len,
    271		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
    272		vdso_info->cm);
    273
    274	if (IS_ERR(ret))
    275		goto up_fail;
    276
    277	return 0;
    278
    279up_fail:
    280	mm->context.vdso = NULL;
    281	return PTR_ERR(ret);
    282}
    283
    284#ifdef CONFIG_COMPAT
    285int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
    286				       int uses_interp)
    287{
    288	struct mm_struct *mm = current->mm;
    289	int ret;
    290
    291	if (mmap_write_lock_killable(mm))
    292		return -EINTR;
    293
    294	ret = __setup_additional_pages(mm, bprm, uses_interp,
    295							&compat_vdso_info);
    296	mmap_write_unlock(mm);
    297
    298	return ret;
    299}
    300#endif
    301
    302int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
    303{
    304	struct mm_struct *mm = current->mm;
    305	int ret;
    306
    307	if (mmap_write_lock_killable(mm))
    308		return -EINTR;
    309
    310	ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
    311	mmap_write_unlock(mm);
    312
    313	return ret;
    314}