cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vdso.c (5979B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Adapted from arm64 version.
      4 *
      5 * Copyright (C) 2012 ARM Limited
      6 * Copyright (C) 2015 Mentor Graphics Corporation.
      7 */
      8
      9#include <linux/cache.h>
     10#include <linux/elf.h>
     11#include <linux/err.h>
     12#include <linux/kernel.h>
     13#include <linux/mm.h>
     14#include <linux/of.h>
     15#include <linux/printk.h>
     16#include <linux/slab.h>
     17#include <linux/timekeeper_internal.h>
     18#include <linux/vmalloc.h>
     19#include <asm/arch_timer.h>
     20#include <asm/barrier.h>
     21#include <asm/cacheflush.h>
     22#include <asm/page.h>
     23#include <asm/vdso.h>
     24#include <asm/vdso_datapage.h>
     25#include <clocksource/arm_arch_timer.h>
     26#include <vdso/helpers.h>
     27#include <vdso/vsyscall.h>
     28
     29#define MAX_SYMNAME	64
     30
     31static struct page **vdso_text_pagelist;
     32
     33extern char vdso_start[], vdso_end[];
     34
     35/* Total number of pages needed for the data and text portions of the VDSO. */
     36unsigned int vdso_total_pages __ro_after_init;
     37
     38/*
     39 * The VDSO data page.
     40 */
     41static union vdso_data_store vdso_data_store __page_aligned_data;
     42struct vdso_data *vdso_data = vdso_data_store.data;
     43
     44static struct page *vdso_data_page __ro_after_init;
     45static const struct vm_special_mapping vdso_data_mapping = {
     46	.name = "[vvar]",
     47	.pages = &vdso_data_page,
     48};
     49
     50static int vdso_mremap(const struct vm_special_mapping *sm,
     51		struct vm_area_struct *new_vma)
     52{
     53	current->mm->context.vdso = new_vma->vm_start;
     54
     55	return 0;
     56}
     57
     58static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
     59	.name = "[vdso]",
     60	.mremap = vdso_mremap,
     61};
     62
     63struct elfinfo {
     64	Elf32_Ehdr	*hdr;		/* ptr to ELF */
     65	Elf32_Sym	*dynsym;	/* ptr to .dynsym section */
     66	unsigned long	dynsymsize;	/* size of .dynsym section */
     67	char		*dynstr;	/* ptr to .dynstr section */
     68};
     69
     70/* Cached result of boot-time check for whether the arch timer exists,
     71 * and if so, whether the virtual counter is useable.
     72 */
     73bool cntvct_ok __ro_after_init;
     74
     75static bool __init cntvct_functional(void)
     76{
     77	struct device_node *np;
     78	bool ret = false;
     79
     80	if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
     81		goto out;
     82
     83	/* The arm_arch_timer core should export
     84	 * arch_timer_use_virtual or similar so we don't have to do
     85	 * this.
     86	 */
     87	np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
     88	if (!np)
     89		np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
     90	if (!np)
     91		goto out_put;
     92
     93	if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
     94		goto out_put;
     95
     96	ret = true;
     97
     98out_put:
     99	of_node_put(np);
    100out:
    101	return ret;
    102}
    103
    104static void * __init find_section(Elf32_Ehdr *ehdr, const char *name,
    105				  unsigned long *size)
    106{
    107	Elf32_Shdr *sechdrs;
    108	unsigned int i;
    109	char *secnames;
    110
    111	/* Grab section headers and strings so we can tell who is who */
    112	sechdrs = (void *)ehdr + ehdr->e_shoff;
    113	secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
    114
    115	/* Find the section they want */
    116	for (i = 1; i < ehdr->e_shnum; i++) {
    117		if (strcmp(secnames + sechdrs[i].sh_name, name) == 0) {
    118			if (size)
    119				*size = sechdrs[i].sh_size;
    120			return (void *)ehdr + sechdrs[i].sh_offset;
    121		}
    122	}
    123
    124	if (size)
    125		*size = 0;
    126	return NULL;
    127}
    128
    129static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname)
    130{
    131	unsigned int i;
    132
    133	for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
    134		char name[MAX_SYMNAME], *c;
    135
    136		if (lib->dynsym[i].st_name == 0)
    137			continue;
    138		strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
    139			MAX_SYMNAME);
    140		c = strchr(name, '@');
    141		if (c)
    142			*c = 0;
    143		if (strcmp(symname, name) == 0)
    144			return &lib->dynsym[i];
    145	}
    146	return NULL;
    147}
    148
    149static void __init vdso_nullpatch_one(struct elfinfo *lib, const char *symname)
    150{
    151	Elf32_Sym *sym;
    152
    153	sym = find_symbol(lib, symname);
    154	if (!sym)
    155		return;
    156
    157	sym->st_name = 0;
    158}
    159
    160static void __init patch_vdso(void *ehdr)
    161{
    162	struct elfinfo einfo;
    163
    164	einfo = (struct elfinfo) {
    165		.hdr = ehdr,
    166	};
    167
    168	einfo.dynsym = find_section(einfo.hdr, ".dynsym", &einfo.dynsymsize);
    169	einfo.dynstr = find_section(einfo.hdr, ".dynstr", NULL);
    170
    171	/* If the virtual counter is absent or non-functional we don't
    172	 * want programs to incur the slight additional overhead of
    173	 * dispatching through the VDSO only to fall back to syscalls.
    174	 */
    175	if (!cntvct_ok) {
    176		vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
    177		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
    178		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
    179	}
    180}
    181
    182static int __init vdso_init(void)
    183{
    184	unsigned int text_pages;
    185	int i;
    186
    187	if (memcmp(vdso_start, "\177ELF", 4)) {
    188		pr_err("VDSO is not a valid ELF object!\n");
    189		return -ENOEXEC;
    190	}
    191
    192	text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
    193
    194	/* Allocate the VDSO text pagelist */
    195	vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
    196				     GFP_KERNEL);
    197	if (vdso_text_pagelist == NULL)
    198		return -ENOMEM;
    199
    200	/* Grab the VDSO data page. */
    201	vdso_data_page = virt_to_page(vdso_data);
    202
    203	/* Grab the VDSO text pages. */
    204	for (i = 0; i < text_pages; i++) {
    205		struct page *page;
    206
    207		page = virt_to_page(vdso_start + i * PAGE_SIZE);
    208		vdso_text_pagelist[i] = page;
    209	}
    210
    211	vdso_text_mapping.pages = vdso_text_pagelist;
    212
    213	vdso_total_pages = 1; /* for the data/vvar page */
    214	vdso_total_pages += text_pages;
    215
    216	cntvct_ok = cntvct_functional();
    217
    218	patch_vdso(vdso_start);
    219
    220	return 0;
    221}
    222arch_initcall(vdso_init);
    223
    224static int install_vvar(struct mm_struct *mm, unsigned long addr)
    225{
    226	struct vm_area_struct *vma;
    227
    228	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
    229				       VM_READ | VM_MAYREAD,
    230				       &vdso_data_mapping);
    231
    232	return PTR_ERR_OR_ZERO(vma);
    233}
    234
    235/* assumes mmap_lock is write-locked */
    236void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
    237{
    238	struct vm_area_struct *vma;
    239	unsigned long len;
    240
    241	mm->context.vdso = 0;
    242
    243	if (vdso_text_pagelist == NULL)
    244		return;
    245
    246	if (install_vvar(mm, addr))
    247		return;
    248
    249	/* Account for vvar page. */
    250	addr += PAGE_SIZE;
    251	len = (vdso_total_pages - 1) << PAGE_SHIFT;
    252
    253	vma = _install_special_mapping(mm, addr, len,
    254		VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
    255		&vdso_text_mapping);
    256
    257	if (!IS_ERR(vma))
    258		mm->context.vdso = addr;
    259}
    260