cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fault.c (6142B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/arch/alpha/mm/fault.c
      4 *
      5 *  Copyright (C) 1995  Linus Torvalds
      6 */
      7
      8#include <linux/sched/signal.h>
      9#include <linux/kernel.h>
     10#include <linux/mm.h>
     11#include <asm/io.h>
     12
     13#define __EXTERN_INLINE inline
     14#include <asm/mmu_context.h>
     15#include <asm/tlbflush.h>
     16#undef  __EXTERN_INLINE
     17
     18#include <linux/signal.h>
     19#include <linux/errno.h>
     20#include <linux/string.h>
     21#include <linux/types.h>
     22#include <linux/ptrace.h>
     23#include <linux/mman.h>
     24#include <linux/smp.h>
     25#include <linux/interrupt.h>
     26#include <linux/extable.h>
     27#include <linux/uaccess.h>
     28#include <linux/perf_event.h>
     29
     30extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
     31
     32
     33/*
     34 * Force a new ASN for a task.
     35 */
     36
     37#ifndef CONFIG_SMP
     38unsigned long last_asn = ASN_FIRST_VERSION;
     39#endif
     40
     41void
     42__load_new_mm_context(struct mm_struct *next_mm)
     43{
     44	unsigned long mmc;
     45	struct pcb_struct *pcb;
     46
     47	mmc = __get_new_mm_context(next_mm, smp_processor_id());
     48	next_mm->context[smp_processor_id()] = mmc;
     49
     50	pcb = &current_thread_info()->pcb;
     51	pcb->asn = mmc & HARDWARE_ASN_MASK;
     52	pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
     53
     54	__reload_thread(pcb);
     55}
     56
     57
     58/*
     59 * This routine handles page faults.  It determines the address,
     60 * and the problem, and then passes it off to handle_mm_fault().
     61 *
     62 * mmcsr:
     63 *	0 = translation not valid
     64 *	1 = access violation
     65 *	2 = fault-on-read
     66 *	3 = fault-on-execute
     67 *	4 = fault-on-write
     68 *
     69 * cause:
     70 *	-1 = instruction fetch
     71 *	0 = load
     72 *	1 = store
     73 *
     74 * Registers $9 through $15 are saved in a block just prior to `regs' and
     75 * are saved and restored around the call to allow exception code to
     76 * modify them.
     77 */
     78
     79/* Macro for exception fixup code to access integer registers.  */
     80#define dpf_reg(r)							\
     81	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
     82				 (r) <= 18 ? (r)+10 : (r)-10])
     83
     84asmlinkage void
     85do_page_fault(unsigned long address, unsigned long mmcsr,
     86	      long cause, struct pt_regs *regs)
     87{
     88	struct vm_area_struct * vma;
     89	struct mm_struct *mm = current->mm;
     90	const struct exception_table_entry *fixup;
     91	int si_code = SEGV_MAPERR;
     92	vm_fault_t fault;
     93	unsigned int flags = FAULT_FLAG_DEFAULT;
     94
     95	/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
     96	   (or is suppressed by the PALcode).  Support that for older CPUs
     97	   by ignoring such an instruction.  */
     98	if (cause == 0) {
     99		unsigned int insn;
    100		__get_user(insn, (unsigned int __user *)regs->pc);
    101		if ((insn >> 21 & 0x1f) == 0x1f &&
    102		    /* ldq ldl ldt lds ldg ldf ldwu ldbu */
    103		    (1ul << (insn >> 26) & 0x30f00001400ul)) {
    104			regs->pc += 4;
    105			return;
    106		}
    107	}
    108
    109	/* If we're in an interrupt context, or have no user context,
    110	   we must not take the fault.  */
    111	if (!mm || faulthandler_disabled())
    112		goto no_context;
    113
    114#ifdef CONFIG_ALPHA_LARGE_VMALLOC
    115	if (address >= TASK_SIZE)
    116		goto vmalloc_fault;
    117#endif
    118	if (user_mode(regs))
    119		flags |= FAULT_FLAG_USER;
    120	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
    121retry:
    122	mmap_read_lock(mm);
    123	vma = find_vma(mm, address);
    124	if (!vma)
    125		goto bad_area;
    126	if (vma->vm_start <= address)
    127		goto good_area;
    128	if (!(vma->vm_flags & VM_GROWSDOWN))
    129		goto bad_area;
    130	if (expand_stack(vma, address))
    131		goto bad_area;
    132
    133	/* Ok, we have a good vm_area for this memory access, so
    134	   we can handle it.  */
    135 good_area:
    136	si_code = SEGV_ACCERR;
    137	if (cause < 0) {
    138		if (!(vma->vm_flags & VM_EXEC))
    139			goto bad_area;
    140	} else if (!cause) {
    141		/* Allow reads even for write-only mappings */
    142		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
    143			goto bad_area;
    144	} else {
    145		if (!(vma->vm_flags & VM_WRITE))
    146			goto bad_area;
    147		flags |= FAULT_FLAG_WRITE;
    148	}
    149
    150	/* If for any reason at all we couldn't handle the fault,
    151	   make sure we exit gracefully rather than endlessly redo
    152	   the fault.  */
    153	fault = handle_mm_fault(vma, address, flags, regs);
    154
    155	if (fault_signal_pending(fault, regs))
    156		return;
    157
    158	if (unlikely(fault & VM_FAULT_ERROR)) {
    159		if (fault & VM_FAULT_OOM)
    160			goto out_of_memory;
    161		else if (fault & VM_FAULT_SIGSEGV)
    162			goto bad_area;
    163		else if (fault & VM_FAULT_SIGBUS)
    164			goto do_sigbus;
    165		BUG();
    166	}
    167
    168	if (fault & VM_FAULT_RETRY) {
    169		flags |= FAULT_FLAG_TRIED;
    170
    171		/* No need to mmap_read_unlock(mm) as we would
    172		 * have already released it in __lock_page_or_retry
    173		 * in mm/filemap.c.
    174		 */
    175
    176		goto retry;
    177	}
    178
    179	mmap_read_unlock(mm);
    180
    181	return;
    182
    183	/* Something tried to access memory that isn't in our memory map.
    184	   Fix it, but check if it's kernel or user first.  */
    185 bad_area:
    186	mmap_read_unlock(mm);
    187
    188	if (user_mode(regs))
    189		goto do_sigsegv;
    190
    191 no_context:
    192	/* Are we prepared to handle this fault as an exception?  */
    193	if ((fixup = search_exception_tables(regs->pc)) != 0) {
    194		unsigned long newpc;
    195		newpc = fixup_exception(dpf_reg, fixup, regs->pc);
    196		regs->pc = newpc;
    197		return;
    198	}
    199
    200	/* Oops. The kernel tried to access some bad page. We'll have to
    201	   terminate things with extreme prejudice.  */
    202	printk(KERN_ALERT "Unable to handle kernel paging request at "
    203	       "virtual address %016lx\n", address);
    204	die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
    205	make_task_dead(SIGKILL);
    206
    207	/* We ran out of memory, or some other thing happened to us that
    208	   made us unable to handle the page fault gracefully.  */
    209 out_of_memory:
    210	mmap_read_unlock(mm);
    211	if (!user_mode(regs))
    212		goto no_context;
    213	pagefault_out_of_memory();
    214	return;
    215
    216 do_sigbus:
    217	mmap_read_unlock(mm);
    218	/* Send a sigbus, regardless of whether we were in kernel
    219	   or user mode.  */
    220	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address);
    221	if (!user_mode(regs))
    222		goto no_context;
    223	return;
    224
    225 do_sigsegv:
    226	force_sig_fault(SIGSEGV, si_code, (void __user *) address);
    227	return;
    228
    229#ifdef CONFIG_ALPHA_LARGE_VMALLOC
    230 vmalloc_fault:
    231	if (user_mode(regs))
    232		goto do_sigsegv;
    233	else {
    234		/* Synchronize this task's top level page-table
    235		   with the "reference" page table from init.  */
    236		long index = pgd_index(address);
    237		pgd_t *pgd, *pgd_k;
    238
    239		pgd = current->active_mm->pgd + index;
    240		pgd_k = swapper_pg_dir + index;
    241		if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
    242			pgd_val(*pgd) = pgd_val(*pgd_k);
    243			return;
    244		}
    245		goto no_context;
    246	}
    247#endif
    248}