cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

process.c (10938B)


      1/*
      2 * arch/xtensa/kernel/process.c
      3 *
      4 * Xtensa Processor version.
      5 *
      6 * This file is subject to the terms and conditions of the GNU General Public
      7 * License.  See the file "COPYING" in the main directory of this archive
      8 * for more details.
      9 *
     10 * Copyright (C) 2001 - 2005 Tensilica Inc.
     11 *
     12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
     13 * Chris Zankel <chris@zankel.net>
     14 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
     15 * Kevin Chea
     16 */
     17
     18#include <linux/errno.h>
     19#include <linux/sched.h>
     20#include <linux/sched/debug.h>
     21#include <linux/sched/task.h>
     22#include <linux/sched/task_stack.h>
     23#include <linux/kernel.h>
     24#include <linux/mm.h>
     25#include <linux/smp.h>
     26#include <linux/stddef.h>
     27#include <linux/unistd.h>
     28#include <linux/ptrace.h>
     29#include <linux/elf.h>
     30#include <linux/hw_breakpoint.h>
     31#include <linux/init.h>
     32#include <linux/prctl.h>
     33#include <linux/init_task.h>
     34#include <linux/module.h>
     35#include <linux/mqueue.h>
     36#include <linux/fs.h>
     37#include <linux/slab.h>
     38#include <linux/rcupdate.h>
     39
     40#include <linux/uaccess.h>
     41#include <asm/io.h>
     42#include <asm/processor.h>
     43#include <asm/platform.h>
     44#include <asm/mmu.h>
     45#include <asm/irq.h>
     46#include <linux/atomic.h>
     47#include <asm/asm-offsets.h>
     48#include <asm/regs.h>
     49#include <asm/hw_breakpoint.h>
     50#include <asm/traps.h>
     51
     52extern void ret_from_fork(void);
     53extern void ret_from_kernel_thread(void);
     54
     55void (*pm_power_off)(void) = NULL;
     56EXPORT_SYMBOL(pm_power_off);
     57
     58
     59#ifdef CONFIG_STACKPROTECTOR
     60#include <linux/stackprotector.h>
     61unsigned long __stack_chk_guard __read_mostly;
     62EXPORT_SYMBOL(__stack_chk_guard);
     63#endif
     64
     65#if XTENSA_HAVE_COPROCESSORS
     66
     67void local_coprocessors_flush_release_all(void)
     68{
     69	struct thread_info **coprocessor_owner;
     70	struct thread_info *unique_owner[XCHAL_CP_MAX];
     71	int n = 0;
     72	int i, j;
     73
     74	coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
     75	xtensa_set_sr(XCHAL_CP_MASK, cpenable);
     76
     77	for (i = 0; i < XCHAL_CP_MAX; i++) {
     78		struct thread_info *ti = coprocessor_owner[i];
     79
     80		if (ti) {
     81			coprocessor_flush(ti, i);
     82
     83			for (j = 0; j < n; j++)
     84				if (unique_owner[j] == ti)
     85					break;
     86			if (j == n)
     87				unique_owner[n++] = ti;
     88
     89			coprocessor_owner[i] = NULL;
     90		}
     91	}
     92	for (i = 0; i < n; i++) {
     93		/* pairs with memw (1) in fast_coprocessor and memw in switch_to */
     94		smp_wmb();
     95		unique_owner[i]->cpenable = 0;
     96	}
     97	xtensa_set_sr(0, cpenable);
     98}
     99
    100static void local_coprocessor_release_all(void *info)
    101{
    102	struct thread_info *ti = info;
    103	struct thread_info **coprocessor_owner;
    104	int i;
    105
    106	coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
    107
    108	/* Walk through all cp owners and release it for the requested one. */
    109
    110	for (i = 0; i < XCHAL_CP_MAX; i++) {
    111		if (coprocessor_owner[i] == ti)
    112			coprocessor_owner[i] = NULL;
    113	}
    114	/* pairs with memw (1) in fast_coprocessor and memw in switch_to */
    115	smp_wmb();
    116	ti->cpenable = 0;
    117	if (ti == current_thread_info())
    118		xtensa_set_sr(0, cpenable);
    119}
    120
    121void coprocessor_release_all(struct thread_info *ti)
    122{
    123	if (ti->cpenable) {
    124		/* pairs with memw (2) in fast_coprocessor */
    125		smp_rmb();
    126		smp_call_function_single(ti->cp_owner_cpu,
    127					 local_coprocessor_release_all,
    128					 ti, true);
    129	}
    130}
    131
    132static void local_coprocessor_flush_all(void *info)
    133{
    134	struct thread_info *ti = info;
    135	struct thread_info **coprocessor_owner;
    136	unsigned long old_cpenable;
    137	int i;
    138
    139	coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
    140	old_cpenable = xtensa_xsr(ti->cpenable, cpenable);
    141
    142	for (i = 0; i < XCHAL_CP_MAX; i++) {
    143		if (coprocessor_owner[i] == ti)
    144			coprocessor_flush(ti, i);
    145	}
    146	xtensa_set_sr(old_cpenable, cpenable);
    147}
    148
    149void coprocessor_flush_all(struct thread_info *ti)
    150{
    151	if (ti->cpenable) {
    152		/* pairs with memw (2) in fast_coprocessor */
    153		smp_rmb();
    154		smp_call_function_single(ti->cp_owner_cpu,
    155					 local_coprocessor_flush_all,
    156					 ti, true);
    157	}
    158}
    159
    160static void local_coprocessor_flush_release_all(void *info)
    161{
    162	local_coprocessor_flush_all(info);
    163	local_coprocessor_release_all(info);
    164}
    165
    166void coprocessor_flush_release_all(struct thread_info *ti)
    167{
    168	if (ti->cpenable) {
    169		/* pairs with memw (2) in fast_coprocessor */
    170		smp_rmb();
    171		smp_call_function_single(ti->cp_owner_cpu,
    172					 local_coprocessor_flush_release_all,
    173					 ti, true);
    174	}
    175}
    176
    177#endif
    178
    179
    180/*
    181 * Powermanagement idle function, if any is provided by the platform.
    182 */
    183void arch_cpu_idle(void)
    184{
    185	platform_idle();
    186}
    187
    188/*
    189 * This is called when the thread calls exit().
    190 */
    191void exit_thread(struct task_struct *tsk)
    192{
    193#if XTENSA_HAVE_COPROCESSORS
    194	coprocessor_release_all(task_thread_info(tsk));
    195#endif
    196}
    197
    198/*
    199 * Flush thread state. This is called when a thread does an execve()
    200 * Note that we flush coprocessor registers for the case execve fails.
    201 */
    202void flush_thread(void)
    203{
    204#if XTENSA_HAVE_COPROCESSORS
    205	struct thread_info *ti = current_thread_info();
    206	coprocessor_flush_release_all(ti);
    207#endif
    208	flush_ptrace_hw_breakpoint(current);
    209}
    210
    211/*
    212 * this gets called so that we can store coprocessor state into memory and
    213 * copy the current task into the new thread.
    214 */
    215int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
    216{
    217#if XTENSA_HAVE_COPROCESSORS
    218	coprocessor_flush_all(task_thread_info(src));
    219#endif
    220	*dst = *src;
    221	return 0;
    222}
    223
    224/*
    225 * Copy thread.
    226 *
    227 * There are two modes in which this function is called:
    228 * 1) Userspace thread creation,
    229 *    regs != NULL, usp_thread_fn is userspace stack pointer.
    230 *    It is expected to copy parent regs (in case CLONE_VM is not set
    231 *    in the clone_flags) and set up passed usp in the childregs.
    232 * 2) Kernel thread creation,
    233 *    regs == NULL, usp_thread_fn is the function to run in the new thread
    234 *    and thread_fn_arg is its parameter.
    235 *    childregs are not used for the kernel threads.
    236 *
    237 * The stack layout for the new thread looks like this:
    238 *
    239 *	+------------------------+
    240 *	|       childregs        |
    241 *	+------------------------+ <- thread.sp = sp in dummy-frame
    242 *	|      dummy-frame       |    (saved in dummy-frame spill-area)
    243 *	+------------------------+
    244 *
    245 * We create a dummy frame to return to either ret_from_fork or
    246 *   ret_from_kernel_thread:
    247 *   a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
    248 *   sp points to itself (thread.sp)
    249 *   a2, a3 are unused for userspace threads,
    250 *   a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
    251 *
    252 * Note: This is a pristine frame, so we don't need any spill region on top of
    253 *       childregs.
    254 *
    255 * The fun part:  if we're keeping the same VM (i.e. cloning a thread,
    256 * not an entire process), we're normally given a new usp, and we CANNOT share
    257 * any live address register windows.  If we just copy those live frames over,
    258 * the two threads (parent and child) will overflow the same frames onto the
    259 * parent stack at different times, likely corrupting the parent stack (esp.
    260 * if the parent returns from functions that called clone() and calls new
    261 * ones, before the child overflows its now old copies of its parent windows).
    262 * One solution is to spill windows to the parent stack, but that's fairly
    263 * involved.  Much simpler to just not copy those live frames across.
    264 */
    265
    266int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
    267{
    268	unsigned long clone_flags = args->flags;
    269	unsigned long usp_thread_fn = args->stack;
    270	unsigned long tls = args->tls;
    271	struct pt_regs *childregs = task_pt_regs(p);
    272
    273#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
    274	struct thread_info *ti;
    275#endif
    276
    277#if defined(__XTENSA_WINDOWED_ABI__)
    278	/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
    279	SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
    280	SPILL_SLOT(childregs, 0) = 0;
    281
    282	p->thread.sp = (unsigned long)childregs;
    283#elif defined(__XTENSA_CALL0_ABI__)
    284	/* Reserve 16 bytes for the _switch_to stack frame. */
    285	p->thread.sp = (unsigned long)childregs - 16;
    286#else
    287#error Unsupported Xtensa ABI
    288#endif
    289
    290	if (!args->fn) {
    291		struct pt_regs *regs = current_pt_regs();
    292		unsigned long usp = usp_thread_fn ?
    293			usp_thread_fn : regs->areg[1];
    294
    295		p->thread.ra = MAKE_RA_FOR_CALL(
    296				(unsigned long)ret_from_fork, 0x1);
    297
    298		*childregs = *regs;
    299		childregs->areg[1] = usp;
    300		childregs->areg[2] = 0;
    301
    302		/* When sharing memory with the parent thread, the child
    303		   usually starts on a pristine stack, so we have to reset
    304		   windowbase, windowstart and wmask.
    305		   (Note that such a new thread is required to always create
    306		   an initial call4 frame)
    307		   The exception is vfork, where the new thread continues to
    308		   run on the parent's stack until it calls execve. This could
    309		   be a call8 or call12, which requires a legal stack frame
    310		   of the previous caller for the overflow handlers to work.
    311		   (Note that it's always legal to overflow live registers).
    312		   In this case, ensure to spill at least the stack pointer
    313		   of that frame. */
    314
    315		if (clone_flags & CLONE_VM) {
    316			/* check that caller window is live and same stack */
    317			int len = childregs->wmask & ~0xf;
    318			if (regs->areg[1] == usp && len != 0) {
    319				int callinc = (regs->areg[0] >> 30) & 3;
    320				int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
    321				put_user(regs->areg[caller_ars+1],
    322					 (unsigned __user*)(usp - 12));
    323			}
    324			childregs->wmask = 1;
    325			childregs->windowstart = 1;
    326			childregs->windowbase = 0;
    327		}
    328
    329		if (clone_flags & CLONE_SETTLS)
    330			childregs->threadptr = tls;
    331	} else {
    332		p->thread.ra = MAKE_RA_FOR_CALL(
    333				(unsigned long)ret_from_kernel_thread, 1);
    334
    335		/* pass parameters to ret_from_kernel_thread: */
    336#if defined(__XTENSA_WINDOWED_ABI__)
    337		/*
    338		 * a2 = thread_fn, a3 = thread_fn arg.
    339		 * Window underflow will load registers from the
    340		 * spill slots on the stack on return from _switch_to.
    341		 */
    342		SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
    343		SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
    344#elif defined(__XTENSA_CALL0_ABI__)
    345		/*
    346		 * a12 = thread_fn, a13 = thread_fn arg.
    347		 * _switch_to epilogue will load registers from the stack.
    348		 */
    349		((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
    350		((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
    351#else
    352#error Unsupported Xtensa ABI
    353#endif
    354
    355		/* Childregs are only used when we're going to userspace
    356		 * in which case start_thread will set them up.
    357		 */
    358	}
    359
    360#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
    361	ti = task_thread_info(p);
    362	ti->cpenable = 0;
    363#endif
    364
    365	clear_ptrace_hw_breakpoint(p);
    366
    367	return 0;
    368}
    369
    370
    371/*
    372 * These bracket the sleeping functions..
    373 */
    374
    375unsigned long __get_wchan(struct task_struct *p)
    376{
    377	unsigned long sp, pc;
    378	unsigned long stack_page = (unsigned long) task_stack_page(p);
    379	int count = 0;
    380
    381	sp = p->thread.sp;
    382	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
    383
    384	do {
    385		if (sp < stack_page + sizeof(struct task_struct) ||
    386		    sp >= (stack_page + THREAD_SIZE) ||
    387		    pc == 0)
    388			return 0;
    389		if (!in_sched_functions(pc))
    390			return pc;
    391
    392		/* Stack layout: sp-4: ra, sp-3: sp' */
    393
    394		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
    395		sp = SPILL_SLOT(sp, 1);
    396	} while (count++ < 16);
    397	return 0;
    398}