cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fault.c (6488B)


      1// TODO VM_EXEC flag work-around, cache aliasing
      2/*
      3 * arch/xtensa/mm/fault.c
      4 *
      5 * This file is subject to the terms and conditions of the GNU General Public
      6 * License.  See the file "COPYING" in the main directory of this archive
      7 * for more details.
      8 *
      9 * Copyright (C) 2001 - 2010 Tensilica Inc.
     10 *
     11 * Chris Zankel <chris@zankel.net>
     12 * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
     13 */
     14
     15#include <linux/mm.h>
     16#include <linux/extable.h>
     17#include <linux/hardirq.h>
     18#include <linux/perf_event.h>
     19#include <linux/uaccess.h>
     20#include <asm/mmu_context.h>
     21#include <asm/cacheflush.h>
     22#include <asm/hardirq.h>
     23
     24void bad_page_fault(struct pt_regs*, unsigned long, int);
     25
     26static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
     27{
     28#ifdef CONFIG_MMU
     29	/* Synchronize this task's top level page-table
     30	 * with the 'reference' page table.
     31	 */
     32	struct mm_struct *act_mm = current->active_mm;
     33	int index = pgd_index(address);
     34	pgd_t *pgd, *pgd_k;
     35	p4d_t *p4d, *p4d_k;
     36	pud_t *pud, *pud_k;
     37	pmd_t *pmd, *pmd_k;
     38	pte_t *pte_k;
     39
     40	if (act_mm == NULL)
     41		goto bad_page_fault;
     42
     43	pgd = act_mm->pgd + index;
     44	pgd_k = init_mm.pgd + index;
     45
     46	if (!pgd_present(*pgd_k))
     47		goto bad_page_fault;
     48
     49	pgd_val(*pgd) = pgd_val(*pgd_k);
     50
     51	p4d = p4d_offset(pgd, address);
     52	p4d_k = p4d_offset(pgd_k, address);
     53	if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
     54		goto bad_page_fault;
     55
     56	pud = pud_offset(p4d, address);
     57	pud_k = pud_offset(p4d_k, address);
     58	if (!pud_present(*pud) || !pud_present(*pud_k))
     59		goto bad_page_fault;
     60
     61	pmd = pmd_offset(pud, address);
     62	pmd_k = pmd_offset(pud_k, address);
     63	if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
     64		goto bad_page_fault;
     65
     66	pmd_val(*pmd) = pmd_val(*pmd_k);
     67	pte_k = pte_offset_kernel(pmd_k, address);
     68
     69	if (!pte_present(*pte_k))
     70		goto bad_page_fault;
     71	return;
     72
     73bad_page_fault:
     74	bad_page_fault(regs, address, SIGKILL);
     75#else
     76	WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
     77#endif
     78}
     79/*
     80 * This routine handles page faults.  It determines the address,
     81 * and the problem, and then passes it off to one of the appropriate
     82 * routines.
     83 *
     84 * Note: does not handle Miss and MultiHit.
     85 */
     86
     87void do_page_fault(struct pt_regs *regs)
     88{
     89	struct vm_area_struct * vma;
     90	struct mm_struct *mm = current->mm;
     91	unsigned int exccause = regs->exccause;
     92	unsigned int address = regs->excvaddr;
     93	int code;
     94
     95	int is_write, is_exec;
     96	vm_fault_t fault;
     97	unsigned int flags = FAULT_FLAG_DEFAULT;
     98
     99	code = SEGV_MAPERR;
    100
    101	/* We fault-in kernel-space virtual memory on-demand. The
    102	 * 'reference' page table is init_mm.pgd.
    103	 */
    104	if (address >= TASK_SIZE && !user_mode(regs)) {
    105		vmalloc_fault(regs, address);
    106		return;
    107	}
    108
    109	/* If we're in an interrupt or have no user
    110	 * context, we must not take the fault..
    111	 */
    112	if (faulthandler_disabled() || !mm) {
    113		bad_page_fault(regs, address, SIGSEGV);
    114		return;
    115	}
    116
    117	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
    118	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
    119		    exccause == EXCCAUSE_ITLB_MISS ||
    120		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
    121
    122	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
    123		 current->comm, current->pid,
    124		 address, exccause, regs->pc,
    125		 is_write ? "w" : "", is_exec ? "x" : "");
    126
    127	if (user_mode(regs))
    128		flags |= FAULT_FLAG_USER;
    129
    130	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
    131
    132retry:
    133	mmap_read_lock(mm);
    134	vma = find_vma(mm, address);
    135
    136	if (!vma)
    137		goto bad_area;
    138	if (vma->vm_start <= address)
    139		goto good_area;
    140	if (!(vma->vm_flags & VM_GROWSDOWN))
    141		goto bad_area;
    142	if (expand_stack(vma, address))
    143		goto bad_area;
    144
    145	/* Ok, we have a good vm_area for this memory access, so
    146	 * we can handle it..
    147	 */
    148
    149good_area:
    150	code = SEGV_ACCERR;
    151
    152	if (is_write) {
    153		if (!(vma->vm_flags & VM_WRITE))
    154			goto bad_area;
    155		flags |= FAULT_FLAG_WRITE;
    156	} else if (is_exec) {
    157		if (!(vma->vm_flags & VM_EXEC))
    158			goto bad_area;
    159	} else	/* Allow read even from write-only pages. */
    160		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
    161			goto bad_area;
    162
    163	/* If for any reason at all we couldn't handle the fault,
    164	 * make sure we exit gracefully rather than endlessly redo
    165	 * the fault.
    166	 */
    167	fault = handle_mm_fault(vma, address, flags, regs);
    168
    169	if (fault_signal_pending(fault, regs)) {
    170		if (!user_mode(regs))
    171			bad_page_fault(regs, address, SIGKILL);
    172		return;
    173	}
    174
    175	if (unlikely(fault & VM_FAULT_ERROR)) {
    176		if (fault & VM_FAULT_OOM)
    177			goto out_of_memory;
    178		else if (fault & VM_FAULT_SIGSEGV)
    179			goto bad_area;
    180		else if (fault & VM_FAULT_SIGBUS)
    181			goto do_sigbus;
    182		BUG();
    183	}
    184
    185	if (fault & VM_FAULT_RETRY) {
    186		flags |= FAULT_FLAG_TRIED;
    187
    188		/* No need to mmap_read_unlock(mm) as we would
    189		 * have already released it in __lock_page_or_retry
    190		 * in mm/filemap.c.
    191		 */
    192
    193		goto retry;
    194	}
    195
    196	mmap_read_unlock(mm);
    197	return;
    198
    199	/* Something tried to access memory that isn't in our memory map..
    200	 * Fix it, but check if it's kernel or user first..
    201	 */
    202bad_area:
    203	mmap_read_unlock(mm);
    204	if (user_mode(regs)) {
    205		current->thread.bad_vaddr = address;
    206		current->thread.error_code = is_write;
    207		force_sig_fault(SIGSEGV, code, (void *) address);
    208		return;
    209	}
    210	bad_page_fault(regs, address, SIGSEGV);
    211	return;
    212
    213
    214	/* We ran out of memory, or some other thing happened to us that made
    215	 * us unable to handle the page fault gracefully.
    216	 */
    217out_of_memory:
    218	mmap_read_unlock(mm);
    219	if (!user_mode(regs))
    220		bad_page_fault(regs, address, SIGKILL);
    221	else
    222		pagefault_out_of_memory();
    223	return;
    224
    225do_sigbus:
    226	mmap_read_unlock(mm);
    227
    228	/* Send a sigbus, regardless of whether we were in kernel
    229	 * or user mode.
    230	 */
    231	current->thread.bad_vaddr = address;
    232	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
    233
    234	/* Kernel mode? Handle exceptions or die */
    235	if (!user_mode(regs))
    236		bad_page_fault(regs, address, SIGBUS);
    237	return;
    238}
    239
    240
    241void
    242bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
    243{
    244	extern void __noreturn die(const char*, struct pt_regs*, long);
    245	const struct exception_table_entry *entry;
    246
    247	/* Are we prepared to handle this kernel fault?  */
    248	if ((entry = search_exception_tables(regs->pc)) != NULL) {
    249		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
    250			 current->comm, regs->pc, entry->fixup);
    251		current->thread.bad_uaddr = address;
    252		regs->pc = entry->fixup;
    253		return;
    254	}
    255
    256	/* Oops. The kernel tried to access some bad page. We'll have to
    257	 * terminate things with extreme prejudice.
    258	 */
    259	pr_alert("Unable to handle kernel paging request at virtual "
    260		 "address %08lx\n pc = %08lx, ra = %08lx\n",
    261		 address, regs->pc, regs->areg[0]);
    262	die("Oops", regs, sig);
    263}