cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq_32.c (4164B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
      4 *
      5 * This file contains the lowest level x86-specific interrupt
      6 * entry, irq-stacks and irq statistics code. All the remaining
      7 * irq logic is done by the generic kernel/irq/ code and
      8 * by the x86-specific irq controller code. (e.g. i8259.c and
      9 * io_apic.c.)
     10 */
     11
     12#include <linux/seq_file.h>
     13#include <linux/interrupt.h>
     14#include <linux/irq.h>
     15#include <linux/kernel_stat.h>
     16#include <linux/notifier.h>
     17#include <linux/cpu.h>
     18#include <linux/delay.h>
     19#include <linux/uaccess.h>
     20#include <linux/percpu.h>
     21#include <linux/mm.h>
     22
     23#include <asm/apic.h>
     24#include <asm/nospec-branch.h>
     25#include <asm/softirq_stack.h>
     26
     27#ifdef CONFIG_DEBUG_STACKOVERFLOW
     28
     29int sysctl_panic_on_stackoverflow __read_mostly;
     30
     31/* Debugging check for stack overflow: is there less than 1KB free? */
     32static int check_stack_overflow(void)
     33{
     34	long sp;
     35
     36	__asm__ __volatile__("andl %%esp,%0" :
     37			     "=r" (sp) : "0" (THREAD_SIZE - 1));
     38
     39	return sp < (sizeof(struct thread_info) + STACK_WARN);
     40}
     41
     42static void print_stack_overflow(void)
     43{
     44	printk(KERN_WARNING "low stack detected by irq handler\n");
     45	dump_stack();
     46	if (sysctl_panic_on_stackoverflow)
     47		panic("low stack detected by irq handler - check messages\n");
     48}
     49
     50#else
     51static inline int check_stack_overflow(void) { return 0; }
     52static inline void print_stack_overflow(void) { }
     53#endif
     54
     55DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
     56DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
     57
     58static void call_on_stack(void *func, void *stack)
     59{
     60	asm volatile("xchgl	%%ebx,%%esp	\n"
     61		     CALL_NOSPEC
     62		     "movl	%%ebx,%%esp	\n"
     63		     : "=b" (stack)
     64		     : "0" (stack),
     65		       [thunk_target] "D"(func)
     66		     : "memory", "cc", "edx", "ecx", "eax");
     67}
     68
     69static inline void *current_stack(void)
     70{
     71	return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
     72}
     73
     74static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
     75{
     76	struct irq_stack *curstk, *irqstk;
     77	u32 *isp, *prev_esp, arg1;
     78
     79	curstk = (struct irq_stack *) current_stack();
     80	irqstk = __this_cpu_read(hardirq_stack_ptr);
     81
     82	/*
     83	 * this is where we switch to the IRQ stack. However, if we are
     84	 * already using the IRQ stack (because we interrupted a hardirq
     85	 * handler) we can't do that and just have to keep using the
     86	 * current stack (which is the irq stack already after all)
     87	 */
     88	if (unlikely(curstk == irqstk))
     89		return 0;
     90
     91	isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
     92
     93	/* Save the next esp at the bottom of the stack */
     94	prev_esp = (u32 *)irqstk;
     95	*prev_esp = current_stack_pointer;
     96
     97	if (unlikely(overflow))
     98		call_on_stack(print_stack_overflow, isp);
     99
    100	asm volatile("xchgl	%%ebx,%%esp	\n"
    101		     CALL_NOSPEC
    102		     "movl	%%ebx,%%esp	\n"
    103		     : "=a" (arg1), "=b" (isp)
    104		     :  "0" (desc),   "1" (isp),
    105			[thunk_target] "D" (desc->handle_irq)
    106		     : "memory", "cc", "ecx");
    107	return 1;
    108}
    109
    110/*
    111 * Allocate per-cpu stacks for hardirq and softirq processing
    112 */
    113int irq_init_percpu_irqstack(unsigned int cpu)
    114{
    115	int node = cpu_to_node(cpu);
    116	struct page *ph, *ps;
    117
    118	if (per_cpu(hardirq_stack_ptr, cpu))
    119		return 0;
    120
    121	ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
    122	if (!ph)
    123		return -ENOMEM;
    124	ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
    125	if (!ps) {
    126		__free_pages(ph, THREAD_SIZE_ORDER);
    127		return -ENOMEM;
    128	}
    129
    130	per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
    131	per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
    132	return 0;
    133}
    134
    135#ifndef CONFIG_PREEMPT_RT
    136void do_softirq_own_stack(void)
    137{
    138	struct irq_stack *irqstk;
    139	u32 *isp, *prev_esp;
    140
    141	irqstk = __this_cpu_read(softirq_stack_ptr);
    142
    143	/* build the stack frame on the softirq stack */
    144	isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
    145
    146	/* Push the previous esp onto the stack */
    147	prev_esp = (u32 *)irqstk;
    148	*prev_esp = current_stack_pointer;
    149
    150	call_on_stack(__do_softirq, isp);
    151}
    152#endif
    153
    154void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
    155{
    156	int overflow = check_stack_overflow();
    157
    158	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
    159		if (unlikely(overflow))
    160			print_stack_overflow();
    161		generic_handle_irq_desc(desc);
    162	}
    163}