cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

process.c (8832B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
      4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
      5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
      6 * Copyright 2003 PathScale, Inc.
      7 */
      8
      9#include <linux/stddef.h>
     10#include <linux/err.h>
     11#include <linux/hardirq.h>
     12#include <linux/mm.h>
     13#include <linux/module.h>
     14#include <linux/personality.h>
     15#include <linux/proc_fs.h>
     16#include <linux/ptrace.h>
     17#include <linux/random.h>
     18#include <linux/slab.h>
     19#include <linux/sched.h>
     20#include <linux/sched/debug.h>
     21#include <linux/sched/task.h>
     22#include <linux/sched/task_stack.h>
     23#include <linux/seq_file.h>
     24#include <linux/tick.h>
     25#include <linux/threads.h>
     26#include <linux/resume_user_mode.h>
     27#include <asm/current.h>
     28#include <asm/mmu_context.h>
     29#include <linux/uaccess.h>
     30#include <as-layout.h>
     31#include <kern_util.h>
     32#include <os.h>
     33#include <skas.h>
     34#include <registers.h>
     35#include <linux/time-internal.h>
     36
     37/*
     38 * This is a per-cpu array.  A processor only modifies its entry and it only
     39 * cares about its entry, so it's OK if another processor is modifying its
     40 * entry.
     41 */
     42struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
     43
     44static inline int external_pid(void)
     45{
     46	/* FIXME: Need to look up userspace_pid by cpu */
     47	return userspace_pid[0];
     48}
     49
     50int pid_to_processor_id(int pid)
     51{
     52	int i;
     53
     54	for (i = 0; i < ncpus; i++) {
     55		if (cpu_tasks[i].pid == pid)
     56			return i;
     57	}
     58	return -1;
     59}
     60
     61void free_stack(unsigned long stack, int order)
     62{
     63	free_pages(stack, order);
     64}
     65
     66unsigned long alloc_stack(int order, int atomic)
     67{
     68	unsigned long page;
     69	gfp_t flags = GFP_KERNEL;
     70
     71	if (atomic)
     72		flags = GFP_ATOMIC;
     73	page = __get_free_pages(flags, order);
     74
     75	return page;
     76}
     77
     78static inline void set_current(struct task_struct *task)
     79{
     80	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
     81		{ external_pid(), task });
     82}
     83
     84extern void arch_switch_to(struct task_struct *to);
     85
     86void *__switch_to(struct task_struct *from, struct task_struct *to)
     87{
     88	to->thread.prev_sched = from;
     89	set_current(to);
     90
     91	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
     92	arch_switch_to(current);
     93
     94	return current->thread.prev_sched;
     95}
     96
     97void interrupt_end(void)
     98{
     99	struct pt_regs *regs = &current->thread.regs;
    100
    101	if (need_resched())
    102		schedule();
    103	if (test_thread_flag(TIF_SIGPENDING) ||
    104	    test_thread_flag(TIF_NOTIFY_SIGNAL))
    105		do_signal(regs);
    106	if (test_thread_flag(TIF_NOTIFY_RESUME))
    107		resume_user_mode_work(regs);
    108}
    109
    110int get_current_pid(void)
    111{
    112	return task_pid_nr(current);
    113}
    114
    115/*
    116 * This is called magically, by its address being stuffed in a jmp_buf
    117 * and being longjmp-d to.
    118 */
    119void new_thread_handler(void)
    120{
    121	int (*fn)(void *), n;
    122	void *arg;
    123
    124	if (current->thread.prev_sched != NULL)
    125		schedule_tail(current->thread.prev_sched);
    126	current->thread.prev_sched = NULL;
    127
    128	fn = current->thread.request.u.thread.proc;
    129	arg = current->thread.request.u.thread.arg;
    130
    131	/*
    132	 * callback returns only if the kernel thread execs a process
    133	 */
    134	n = fn(arg);
    135	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
    136}
    137
    138/* Called magically, see new_thread_handler above */
    139void fork_handler(void)
    140{
    141	force_flush_all();
    142
    143	schedule_tail(current->thread.prev_sched);
    144
    145	/*
    146	 * XXX: if interrupt_end() calls schedule, this call to
    147	 * arch_switch_to isn't needed. We could want to apply this to
    148	 * improve performance. -bb
    149	 */
    150	arch_switch_to(current);
    151
    152	current->thread.prev_sched = NULL;
    153
    154	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
    155}
    156
    157int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
    158{
    159	unsigned long clone_flags = args->flags;
    160	unsigned long sp = args->stack;
    161	unsigned long tls = args->tls;
    162	void (*handler)(void);
    163	int ret = 0;
    164
    165	p->thread = (struct thread_struct) INIT_THREAD;
    166
    167	if (!args->fn) {
    168	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
    169		       sizeof(p->thread.regs.regs));
    170		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
    171		if (sp != 0)
    172			REGS_SP(p->thread.regs.regs.gp) = sp;
    173
    174		handler = fork_handler;
    175
    176		arch_copy_thread(&current->thread.arch, &p->thread.arch);
    177	} else {
    178		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
    179		p->thread.request.u.thread.proc = args->fn;
    180		p->thread.request.u.thread.arg = args->fn_arg;
    181		handler = new_thread_handler;
    182	}
    183
    184	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
    185
    186	if (!args->fn) {
    187		clear_flushed_tls(p);
    188
    189		/*
    190		 * Set a new TLS for the child thread?
    191		 */
    192		if (clone_flags & CLONE_SETTLS)
    193			ret = arch_set_tls(p, tls);
    194	}
    195
    196	return ret;
    197}
    198
    199void initial_thread_cb(void (*proc)(void *), void *arg)
    200{
    201	int save_kmalloc_ok = kmalloc_ok;
    202
    203	kmalloc_ok = 0;
    204	initial_thread_cb_skas(proc, arg);
    205	kmalloc_ok = save_kmalloc_ok;
    206}
    207
    208void um_idle_sleep(void)
    209{
    210	if (time_travel_mode != TT_MODE_OFF)
    211		time_travel_sleep();
    212	else
    213		os_idle_sleep();
    214}
    215
    216void arch_cpu_idle(void)
    217{
    218	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
    219	um_idle_sleep();
    220	raw_local_irq_enable();
    221}
    222
    223int __cant_sleep(void) {
    224	return in_atomic() || irqs_disabled() || in_interrupt();
    225	/* Is in_interrupt() really needed? */
    226}
    227
    228int user_context(unsigned long sp)
    229{
    230	unsigned long stack;
    231
    232	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
    233	return stack != (unsigned long) current_thread_info();
    234}
    235
    236extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
    237
    238void do_uml_exitcalls(void)
    239{
    240	exitcall_t *call;
    241
    242	call = &__uml_exitcall_end;
    243	while (--call >= &__uml_exitcall_begin)
    244		(*call)();
    245}
    246
    247char *uml_strdup(const char *string)
    248{
    249	return kstrdup(string, GFP_KERNEL);
    250}
    251EXPORT_SYMBOL(uml_strdup);
    252
    253int copy_to_user_proc(void __user *to, void *from, int size)
    254{
    255	return copy_to_user(to, from, size);
    256}
    257
    258int copy_from_user_proc(void *to, void __user *from, int size)
    259{
    260	return copy_from_user(to, from, size);
    261}
    262
    263int clear_user_proc(void __user *buf, int size)
    264{
    265	return clear_user(buf, size);
    266}
    267
    268static atomic_t using_sysemu = ATOMIC_INIT(0);
    269int sysemu_supported;
    270
    271void set_using_sysemu(int value)
    272{
    273	if (value > sysemu_supported)
    274		return;
    275	atomic_set(&using_sysemu, value);
    276}
    277
    278int get_using_sysemu(void)
    279{
    280	return atomic_read(&using_sysemu);
    281}
    282
    283static int sysemu_proc_show(struct seq_file *m, void *v)
    284{
    285	seq_printf(m, "%d\n", get_using_sysemu());
    286	return 0;
    287}
    288
    289static int sysemu_proc_open(struct inode *inode, struct file *file)
    290{
    291	return single_open(file, sysemu_proc_show, NULL);
    292}
    293
    294static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
    295				 size_t count, loff_t *pos)
    296{
    297	char tmp[2];
    298
    299	if (copy_from_user(tmp, buf, 1))
    300		return -EFAULT;
    301
    302	if (tmp[0] >= '0' && tmp[0] <= '2')
    303		set_using_sysemu(tmp[0] - '0');
    304	/* We use the first char, but pretend to write everything */
    305	return count;
    306}
    307
    308static const struct proc_ops sysemu_proc_ops = {
    309	.proc_open	= sysemu_proc_open,
    310	.proc_read	= seq_read,
    311	.proc_lseek	= seq_lseek,
    312	.proc_release	= single_release,
    313	.proc_write	= sysemu_proc_write,
    314};
    315
    316int __init make_proc_sysemu(void)
    317{
    318	struct proc_dir_entry *ent;
    319	if (!sysemu_supported)
    320		return 0;
    321
    322	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
    323
    324	if (ent == NULL)
    325	{
    326		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
    327		return 0;
    328	}
    329
    330	return 0;
    331}
    332
    333late_initcall(make_proc_sysemu);
    334
    335int singlestepping(void * t)
    336{
    337	struct task_struct *task = t ? t : current;
    338
    339	if (!test_thread_flag(TIF_SINGLESTEP))
    340		return 0;
    341
    342	if (task->thread.singlestep_syscall)
    343		return 1;
    344
    345	return 2;
    346}
    347
    348/*
    349 * Only x86 and x86_64 have an arch_align_stack().
    350 * All other arches have "#define arch_align_stack(x) (x)"
    351 * in their asm/exec.h
    352 * As this is included in UML from asm-um/system-generic.h,
    353 * we can use it to behave as the subarch does.
    354 */
    355#ifndef arch_align_stack
    356unsigned long arch_align_stack(unsigned long sp)
    357{
    358	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
    359		sp -= get_random_int() % 8192;
    360	return sp & ~0xf;
    361}
    362#endif
    363
    364unsigned long __get_wchan(struct task_struct *p)
    365{
    366	unsigned long stack_page, sp, ip;
    367	bool seen_sched = 0;
    368
    369	stack_page = (unsigned long) task_stack_page(p);
    370	/* Bail if the process has no kernel stack for some reason */
    371	if (stack_page == 0)
    372		return 0;
    373
    374	sp = p->thread.switch_buf->JB_SP;
    375	/*
    376	 * Bail if the stack pointer is below the bottom of the kernel
    377	 * stack for some reason
    378	 */
    379	if (sp < stack_page)
    380		return 0;
    381
    382	while (sp < stack_page + THREAD_SIZE) {
    383		ip = *((unsigned long *) sp);
    384		if (in_sched_functions(ip))
    385			/* Ignore everything until we're above the scheduler */
    386			seen_sched = 1;
    387		else if (kernel_text_address(ip) && seen_sched)
    388			return ip;
    389
    390		sp += sizeof(unsigned long);
    391	}
    392
    393	return 0;
    394}
    395
    396int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
    397{
    398	int cpu = current_thread_info()->cpu;
    399
    400	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
    401}
    402