cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fault.c (4846B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/arch/m68k/mm/fault.c
      4 *
      5 *  Copyright (C) 1995  Hamish Macdonald
      6 */
      7
      8#include <linux/mman.h>
      9#include <linux/mm.h>
     10#include <linux/kernel.h>
     11#include <linux/ptrace.h>
     12#include <linux/interrupt.h>
     13#include <linux/module.h>
     14#include <linux/uaccess.h>
     15#include <linux/perf_event.h>
     16
     17#include <asm/setup.h>
     18#include <asm/traps.h>
     19
     20extern void die_if_kernel(char *, struct pt_regs *, long);
     21
     22int send_fault_sig(struct pt_regs *regs)
     23{
     24	int signo, si_code;
     25	void __user *addr;
     26
     27	signo = current->thread.signo;
     28	si_code = current->thread.code;
     29	addr = (void __user *)current->thread.faddr;
     30	pr_debug("send_fault_sig: %p,%d,%d\n", addr, signo, si_code);
     31
     32	if (user_mode(regs)) {
     33		force_sig_fault(signo, si_code, addr);
     34	} else {
     35		if (fixup_exception(regs))
     36			return -1;
     37
     38		//if (signo == SIGBUS)
     39		//	force_sig_fault(si_signo, si_code, addr);
     40
     41		/*
     42		 * Oops. The kernel tried to access some bad page. We'll have to
     43		 * terminate things with extreme prejudice.
     44		 */
     45		if ((unsigned long)addr < PAGE_SIZE)
     46			pr_alert("Unable to handle kernel NULL pointer dereference");
     47		else
     48			pr_alert("Unable to handle kernel access");
     49		pr_cont(" at virtual address %p\n", addr);
     50		die_if_kernel("Oops", regs, 0 /*error_code*/);
     51		make_task_dead(SIGKILL);
     52	}
     53
     54	return 1;
     55}
     56
     57/*
     58 * This routine handles page faults.  It determines the problem, and
     59 * then passes it off to one of the appropriate routines.
     60 *
     61 * error_code:
     62 *	bit 0 == 0 means no page found, 1 means protection fault
     63 *	bit 1 == 0 means read, 1 means write
     64 *
     65 * If this routine detects a bad access, it returns 1, otherwise it
     66 * returns 0.
     67 */
     68int do_page_fault(struct pt_regs *regs, unsigned long address,
     69			      unsigned long error_code)
     70{
     71	struct mm_struct *mm = current->mm;
     72	struct vm_area_struct * vma;
     73	vm_fault_t fault;
     74	unsigned int flags = FAULT_FLAG_DEFAULT;
     75
     76	pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
     77		regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
     78
     79	/*
     80	 * If we're in an interrupt or have no user
     81	 * context, we must not take the fault..
     82	 */
     83	if (faulthandler_disabled() || !mm)
     84		goto no_context;
     85
     86	if (user_mode(regs))
     87		flags |= FAULT_FLAG_USER;
     88
     89	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     90retry:
     91	mmap_read_lock(mm);
     92
     93	vma = find_vma(mm, address);
     94	if (!vma)
     95		goto map_err;
     96	if (vma->vm_start <= address)
     97		goto good_area;
     98	if (!(vma->vm_flags & VM_GROWSDOWN))
     99		goto map_err;
    100	if (user_mode(regs)) {
    101		/* Accessing the stack below usp is always a bug.  The
    102		   "+ 256" is there due to some instructions doing
    103		   pre-decrement on the stack and that doesn't show up
    104		   until later.  */
    105		if (address + 256 < rdusp())
    106			goto map_err;
    107	}
    108	if (expand_stack(vma, address))
    109		goto map_err;
    110
    111/*
    112 * Ok, we have a good vm_area for this memory access, so
    113 * we can handle it..
    114 */
    115good_area:
    116	pr_debug("do_page_fault: good_area\n");
    117	switch (error_code & 3) {
    118		default:	/* 3: write, present */
    119			fallthrough;
    120		case 2:		/* write, not present */
    121			if (!(vma->vm_flags & VM_WRITE))
    122				goto acc_err;
    123			flags |= FAULT_FLAG_WRITE;
    124			break;
    125		case 1:		/* read, present */
    126			goto acc_err;
    127		case 0:		/* read, not present */
    128			if (unlikely(!vma_is_accessible(vma)))
    129				goto acc_err;
    130	}
    131
    132	/*
    133	 * If for any reason at all we couldn't handle the fault,
    134	 * make sure we exit gracefully rather than endlessly redo
    135	 * the fault.
    136	 */
    137
    138	fault = handle_mm_fault(vma, address, flags, regs);
    139	pr_debug("handle_mm_fault returns %x\n", fault);
    140
    141	if (fault_signal_pending(fault, regs))
    142		return 0;
    143
    144	if (unlikely(fault & VM_FAULT_ERROR)) {
    145		if (fault & VM_FAULT_OOM)
    146			goto out_of_memory;
    147		else if (fault & VM_FAULT_SIGSEGV)
    148			goto map_err;
    149		else if (fault & VM_FAULT_SIGBUS)
    150			goto bus_err;
    151		BUG();
    152	}
    153
    154	if (fault & VM_FAULT_RETRY) {
    155		flags |= FAULT_FLAG_TRIED;
    156
    157		/*
    158		 * No need to mmap_read_unlock(mm) as we would
    159		 * have already released it in __lock_page_or_retry
    160		 * in mm/filemap.c.
    161		 */
    162
    163		goto retry;
    164	}
    165
    166	mmap_read_unlock(mm);
    167	return 0;
    168
    169/*
    170 * We ran out of memory, or some other thing happened to us that made
    171 * us unable to handle the page fault gracefully.
    172 */
    173out_of_memory:
    174	mmap_read_unlock(mm);
    175	if (!user_mode(regs))
    176		goto no_context;
    177	pagefault_out_of_memory();
    178	return 0;
    179
    180no_context:
    181	current->thread.signo = SIGBUS;
    182	current->thread.faddr = address;
    183	return send_fault_sig(regs);
    184
    185bus_err:
    186	current->thread.signo = SIGBUS;
    187	current->thread.code = BUS_ADRERR;
    188	current->thread.faddr = address;
    189	goto send_sig;
    190
    191map_err:
    192	current->thread.signo = SIGSEGV;
    193	current->thread.code = SEGV_MAPERR;
    194	current->thread.faddr = address;
    195	goto send_sig;
    196
    197acc_err:
    198	current->thread.signo = SIGSEGV;
    199	current->thread.code = SEGV_ACCERR;
    200	current->thread.faddr = address;
    201
    202send_sig:
    203	mmap_read_unlock(mm);
    204	return send_fault_sig(regs);
    205}