cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fault.c (6972B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * MMU fault handling support.
      4 *
      5 * Copyright (C) 1998-2002 Hewlett-Packard Co
      6 *	David Mosberger-Tang <davidm@hpl.hp.com>
      7 */
      8#include <linux/sched/signal.h>
      9#include <linux/kernel.h>
     10#include <linux/mm.h>
     11#include <linux/extable.h>
     12#include <linux/interrupt.h>
     13#include <linux/kprobes.h>
     14#include <linux/kdebug.h>
     15#include <linux/prefetch.h>
     16#include <linux/uaccess.h>
     17#include <linux/perf_event.h>
     18
     19#include <asm/processor.h>
     20#include <asm/exception.h>
     21
     22extern int die(char *, struct pt_regs *, long);
     23
     24/*
     25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
     26 * (inside region 5, on ia64) and that page is present.
     27 */
     28static int
     29mapped_kernel_page_is_present (unsigned long address)
     30{
     31	pgd_t *pgd;
     32	p4d_t *p4d;
     33	pud_t *pud;
     34	pmd_t *pmd;
     35	pte_t *ptep, pte;
     36
     37	pgd = pgd_offset_k(address);
     38	if (pgd_none(*pgd) || pgd_bad(*pgd))
     39		return 0;
     40
     41	p4d = p4d_offset(pgd, address);
     42	if (p4d_none(*p4d) || p4d_bad(*p4d))
     43		return 0;
     44
     45	pud = pud_offset(p4d, address);
     46	if (pud_none(*pud) || pud_bad(*pud))
     47		return 0;
     48
     49	pmd = pmd_offset(pud, address);
     50	if (pmd_none(*pmd) || pmd_bad(*pmd))
     51		return 0;
     52
     53	ptep = pte_offset_kernel(pmd, address);
     54	if (!ptep)
     55		return 0;
     56
     57	pte = *ptep;
     58	return pte_present(pte);
     59}
     60
     61#	define VM_READ_BIT	0
     62#	define VM_WRITE_BIT	1
     63#	define VM_EXEC_BIT	2
     64
     65void __kprobes
     66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
     67{
     68	int signal = SIGSEGV, code = SEGV_MAPERR;
     69	struct vm_area_struct *vma, *prev_vma;
     70	struct mm_struct *mm = current->mm;
     71	unsigned long mask;
     72	vm_fault_t fault;
     73	unsigned int flags = FAULT_FLAG_DEFAULT;
     74
     75	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
     76		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
     77
     78	/* mmap_lock is performance critical.... */
     79	prefetchw(&mm->mmap_lock);
     80
     81	/*
     82	 * If we're in an interrupt or have no user context, we must not take the fault..
     83	 */
     84	if (faulthandler_disabled() || !mm)
     85		goto no_context;
     86
     87	/*
     88	 * This is to handle the kprobes on user space access instructions
     89	 */
     90	if (kprobe_page_fault(regs, TRAP_BRKPT))
     91		return;
     92
     93	if (user_mode(regs))
     94		flags |= FAULT_FLAG_USER;
     95	if (mask & VM_WRITE)
     96		flags |= FAULT_FLAG_WRITE;
     97
     98	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     99retry:
    100	mmap_read_lock(mm);
    101
    102	vma = find_vma_prev(mm, address, &prev_vma);
    103	if (!vma && !prev_vma )
    104		goto bad_area;
    105
    106        /*
    107         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
    108         *
    109         * May find no vma, but could be that the last vm area is the
    110         * register backing store that needs to expand upwards, in
    111         * this case vma will be null, but prev_vma will ne non-null
    112         */
    113        if (( !vma && prev_vma ) || (address < vma->vm_start) )
    114		goto check_expansion;
    115
    116  good_area:
    117	code = SEGV_ACCERR;
    118
    119	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
    120
    121#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
    122	    || (1 << VM_EXEC_BIT) != VM_EXEC)
    123#		error File is out of sync with <linux/mm.h>.  Please update.
    124#	endif
    125
    126	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
    127		goto bad_area;
    128
    129	if ((vma->vm_flags & mask) != mask)
    130		goto bad_area;
    131
    132	/*
    133	 * If for any reason at all we couldn't handle the fault, make
    134	 * sure we exit gracefully rather than endlessly redo the
    135	 * fault.
    136	 */
    137	fault = handle_mm_fault(vma, address, flags, regs);
    138
    139	if (fault_signal_pending(fault, regs))
    140		return;
    141
    142	if (unlikely(fault & VM_FAULT_ERROR)) {
    143		/*
    144		 * We ran out of memory, or some other thing happened
    145		 * to us that made us unable to handle the page fault
    146		 * gracefully.
    147		 */
    148		if (fault & VM_FAULT_OOM) {
    149			goto out_of_memory;
    150		} else if (fault & VM_FAULT_SIGSEGV) {
    151			goto bad_area;
    152		} else if (fault & VM_FAULT_SIGBUS) {
    153			signal = SIGBUS;
    154			goto bad_area;
    155		}
    156		BUG();
    157	}
    158
    159	if (fault & VM_FAULT_RETRY) {
    160		flags |= FAULT_FLAG_TRIED;
    161
    162		/* No need to mmap_read_unlock(mm) as we would
    163		 * have already released it in __lock_page_or_retry
    164		 * in mm/filemap.c.
    165		 */
    166
    167		goto retry;
    168	}
    169
    170	mmap_read_unlock(mm);
    171	return;
    172
    173  check_expansion:
    174	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
    175		if (!vma)
    176			goto bad_area;
    177		if (!(vma->vm_flags & VM_GROWSDOWN))
    178			goto bad_area;
    179		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
    180		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
    181			goto bad_area;
    182		if (expand_stack(vma, address))
    183			goto bad_area;
    184	} else {
    185		vma = prev_vma;
    186		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
    187		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
    188			goto bad_area;
    189		/*
    190		 * Since the register backing store is accessed sequentially,
    191		 * we disallow growing it by more than a page at a time.
    192		 */
    193		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
    194			goto bad_area;
    195		if (expand_upwards(vma, address))
    196			goto bad_area;
    197	}
    198	goto good_area;
    199
    200  bad_area:
    201	mmap_read_unlock(mm);
    202	if ((isr & IA64_ISR_SP)
    203	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
    204	{
    205		/*
    206		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
    207		 * bit in the psr to ensure forward progress.  (Target register will get a
    208		 * NaT for ld.s, lfetch will be canceled.)
    209		 */
    210		ia64_psr(regs)->ed = 1;
    211		return;
    212	}
    213	if (user_mode(regs)) {
    214		force_sig_fault(signal, code, (void __user *) address,
    215				0, __ISR_VALID, isr);
    216		return;
    217	}
    218
    219  no_context:
    220	if ((isr & IA64_ISR_SP)
    221	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
    222	{
    223		/*
    224		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
    225		 * bit in the psr to ensure forward progress.  (Target register will get a
    226		 * NaT for ld.s, lfetch will be canceled.)
    227		 */
    228		ia64_psr(regs)->ed = 1;
    229		return;
    230	}
    231
    232	/*
    233	 * Since we have no vma's for region 5, we might get here even if the address is
    234	 * valid, due to the VHPT walker inserting a non present translation that becomes
    235	 * stale. If that happens, the non present fault handler already purged the stale
    236	 * translation, which fixed the problem. So, we check to see if the translation is
    237	 * valid, and return if it is.
    238	 */
    239	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
    240		return;
    241
    242	if (ia64_done_with_exception(regs))
    243		return;
    244
    245	/*
    246	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
    247	 * with extreme prejudice.
    248	 */
    249	bust_spinlocks(1);
    250
    251	if (address < PAGE_SIZE)
    252		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
    253	else
    254		printk(KERN_ALERT "Unable to handle kernel paging request at "
    255		       "virtual address %016lx\n", address);
    256	if (die("Oops", regs, isr))
    257		regs = NULL;
    258	bust_spinlocks(0);
    259	if (regs)
    260		make_task_dead(SIGKILL);
    261	return;
    262
    263  out_of_memory:
    264	mmap_read_unlock(mm);
    265	if (!user_mode(regs))
    266		goto no_context;
    267	pagefault_out_of_memory();
    268}