cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

process.c (7063B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Author: Huacai Chen <chenhuacai@loongson.cn>
      4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      5 *
      6 * Derived from MIPS:
      7 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
      8 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
      9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
     10 * Copyright (C) 2004 Thiemo Seufer
     11 * Copyright (C) 2013  Imagination Technologies Ltd.
     12 */
     13#include <linux/cpu.h>
     14#include <linux/init.h>
     15#include <linux/kernel.h>
     16#include <linux/errno.h>
     17#include <linux/sched.h>
     18#include <linux/sched/debug.h>
     19#include <linux/sched/task.h>
     20#include <linux/sched/task_stack.h>
     21#include <linux/mm.h>
     22#include <linux/stddef.h>
     23#include <linux/unistd.h>
     24#include <linux/export.h>
     25#include <linux/ptrace.h>
     26#include <linux/mman.h>
     27#include <linux/personality.h>
     28#include <linux/sys.h>
     29#include <linux/completion.h>
     30#include <linux/kallsyms.h>
     31#include <linux/random.h>
     32#include <linux/prctl.h>
     33#include <linux/nmi.h>
     34
     35#include <asm/asm.h>
     36#include <asm/bootinfo.h>
     37#include <asm/cpu.h>
     38#include <asm/elf.h>
     39#include <asm/fpu.h>
     40#include <asm/io.h>
     41#include <asm/irq.h>
     42#include <asm/irq_regs.h>
     43#include <asm/loongarch.h>
     44#include <asm/pgtable.h>
     45#include <asm/processor.h>
     46#include <asm/reg.h>
     47#include <asm/vdso.h>
     48
     49/*
     50 * Idle related variables and functions
     51 */
     52
     53unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
     54EXPORT_SYMBOL(boot_option_idle_override);
     55
     56#ifdef CONFIG_HOTPLUG_CPU
     57void arch_cpu_idle_dead(void)
     58{
     59	play_dead();
     60}
     61#endif
     62
     63asmlinkage void ret_from_fork(void);
     64asmlinkage void ret_from_kernel_thread(void);
     65
     66void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
     67{
     68	unsigned long crmd;
     69	unsigned long prmd;
     70	unsigned long euen;
     71
     72	/* New thread loses kernel privileges. */
     73	crmd = regs->csr_crmd & ~(PLV_MASK);
     74	crmd |= PLV_USER;
     75	regs->csr_crmd = crmd;
     76
     77	prmd = regs->csr_prmd & ~(PLV_MASK);
     78	prmd |= PLV_USER;
     79	regs->csr_prmd = prmd;
     80
     81	euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
     82	regs->csr_euen = euen;
     83	lose_fpu(0);
     84
     85	clear_thread_flag(TIF_LSX_CTX_LIVE);
     86	clear_thread_flag(TIF_LASX_CTX_LIVE);
     87	clear_used_math();
     88	regs->csr_era = pc;
     89	regs->regs[3] = sp;
     90}
     91
     92void exit_thread(struct task_struct *tsk)
     93{
     94}
     95
     96int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
     97{
     98	/*
     99	 * Save any process state which is live in hardware registers to the
    100	 * parent context prior to duplication. This prevents the new child
    101	 * state becoming stale if the parent is preempted before copy_thread()
    102	 * gets a chance to save the parent's live hardware registers to the
    103	 * child context.
    104	 */
    105	preempt_disable();
    106
    107	if (is_fpu_owner())
    108		save_fp(current);
    109
    110	preempt_enable();
    111
    112	if (used_math())
    113		memcpy(dst, src, sizeof(struct task_struct));
    114	else
    115		memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
    116
    117	return 0;
    118}
    119
    120/*
    121 * Copy architecture-specific thread state
    122 */
    123int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
    124{
    125	unsigned long childksp;
    126	unsigned long tls = args->tls;
    127	unsigned long usp = args->stack;
    128	unsigned long clone_flags = args->flags;
    129	struct pt_regs *childregs, *regs = current_pt_regs();
    130
    131	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
    132
    133	/* set up new TSS. */
    134	childregs = (struct pt_regs *) childksp - 1;
    135	/*  Put the stack after the struct pt_regs.  */
    136	childksp = (unsigned long) childregs;
    137	p->thread.csr_euen = 0;
    138	p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
    139	p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
    140	p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
    141	if (unlikely(args->fn)) {
    142		/* kernel thread */
    143		p->thread.reg03 = childksp;
    144		p->thread.reg23 = (unsigned long)args->fn;
    145		p->thread.reg24 = (unsigned long)args->fn_arg;
    146		p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
    147		memset(childregs, 0, sizeof(struct pt_regs));
    148		childregs->csr_euen = p->thread.csr_euen;
    149		childregs->csr_crmd = p->thread.csr_crmd;
    150		childregs->csr_prmd = p->thread.csr_prmd;
    151		childregs->csr_ecfg = p->thread.csr_ecfg;
    152		return 0;
    153	}
    154
    155	/* user thread */
    156	*childregs = *regs;
    157	childregs->regs[4] = 0; /* Child gets zero as return value */
    158	if (usp)
    159		childregs->regs[3] = usp;
    160
    161	p->thread.reg03 = (unsigned long) childregs;
    162	p->thread.reg01 = (unsigned long) ret_from_fork;
    163
    164	/*
    165	 * New tasks lose permission to use the fpu. This accelerates context
    166	 * switching for most programs since they don't use the fpu.
    167	 */
    168	childregs->csr_euen = 0;
    169
    170	clear_tsk_thread_flag(p, TIF_USEDFPU);
    171	clear_tsk_thread_flag(p, TIF_USEDSIMD);
    172	clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
    173	clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
    174
    175	if (clone_flags & CLONE_SETTLS)
    176		childregs->regs[2] = tls;
    177
    178	return 0;
    179}
    180
    181unsigned long __get_wchan(struct task_struct *task)
    182{
    183	return 0;
    184}
    185
    186unsigned long stack_top(void)
    187{
    188	unsigned long top = TASK_SIZE & PAGE_MASK;
    189
    190	/* Space for the VDSO & data page */
    191	top -= PAGE_ALIGN(current->thread.vdso->size);
    192	top -= PAGE_SIZE;
    193
    194	/* Space to randomize the VDSO base */
    195	if (current->flags & PF_RANDOMIZE)
    196		top -= VDSO_RANDOMIZE_SIZE;
    197
    198	return top;
    199}
    200
    201/*
    202 * Don't forget that the stack pointer must be aligned on a 8 bytes
    203 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
    204 */
    205unsigned long arch_align_stack(unsigned long sp)
    206{
    207	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
    208		sp -= get_random_int() & ~PAGE_MASK;
    209
    210	return sp & STACK_ALIGN;
    211}
    212
    213static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
    214static struct cpumask backtrace_csd_busy;
    215
    216static void handle_backtrace(void *info)
    217{
    218	nmi_cpu_backtrace(get_irq_regs());
    219	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
    220}
    221
    222static void raise_backtrace(cpumask_t *mask)
    223{
    224	call_single_data_t *csd;
    225	int cpu;
    226
    227	for_each_cpu(cpu, mask) {
    228		/*
    229		 * If we previously sent an IPI to the target CPU & it hasn't
    230		 * cleared its bit in the busy cpumask then it didn't handle
    231		 * our previous IPI & it's not safe for us to reuse the
    232		 * call_single_data_t.
    233		 */
    234		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
    235			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
    236				cpu);
    237			continue;
    238		}
    239
    240		csd = &per_cpu(backtrace_csd, cpu);
    241		csd->func = handle_backtrace;
    242		smp_call_function_single_async(cpu, csd);
    243	}
    244}
    245
    246void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
    247{
    248	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
    249}
    250
    251#ifdef CONFIG_64BIT
    252void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
    253{
    254	unsigned int i;
    255
    256	for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
    257		uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
    258	}
    259
    260	uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
    261	uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
    262	uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
    263	uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
    264	uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
    265	uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
    266	uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
    267	uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
    268}
    269#endif /* CONFIG_64BIT */