cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq.c (2661B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
      7 *
      8 * Copyright (C) 1992 Linus Torvalds
      9 * Copyright (C) 1994 - 2000 Ralf Baechle
     10 */
     11#include <linux/kernel.h>
     12#include <linux/delay.h>
     13#include <linux/init.h>
     14#include <linux/interrupt.h>
     15#include <linux/kernel_stat.h>
     16#include <linux/proc_fs.h>
     17#include <linux/mm.h>
     18#include <linux/random.h>
     19#include <linux/sched.h>
     20#include <linux/seq_file.h>
     21#include <linux/kallsyms.h>
     22#include <linux/kgdb.h>
     23#include <linux/ftrace.h>
     24#include <linux/irqdomain.h>
     25
     26#include <linux/atomic.h>
     27#include <linux/uaccess.h>
     28
     29void *irq_stack[NR_CPUS];
     30
     31/*
     32 * 'what should we do if we get a hw irq event on an illegal vector'.
     33 * each architecture has to answer this themselves.
     34 */
     35void ack_bad_irq(unsigned int irq)
     36{
     37	printk("unexpected IRQ # %d\n", irq);
     38}
     39
     40atomic_t irq_err_count;
     41
     42int arch_show_interrupts(struct seq_file *p, int prec)
     43{
     44	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
     45	return 0;
     46}
     47
     48asmlinkage void spurious_interrupt(void)
     49{
     50	atomic_inc(&irq_err_count);
     51}
     52
     53void __init init_IRQ(void)
     54{
     55	int i;
     56	unsigned int order = get_order(IRQ_STACK_SIZE);
     57
     58	for (i = 0; i < NR_IRQS; i++)
     59		irq_set_noprobe(i);
     60
     61	if (cpu_has_veic)
     62		clear_c0_status(ST0_IM);
     63
     64	arch_init_irq();
     65
     66	for_each_possible_cpu(i) {
     67		void *s = (void *)__get_free_pages(GFP_KERNEL, order);
     68
     69		irq_stack[i] = s;
     70		pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
     71			irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
     72	}
     73}
     74
     75#ifdef CONFIG_DEBUG_STACKOVERFLOW
     76static inline void check_stack_overflow(void)
     77{
     78	unsigned long sp;
     79
     80	__asm__ __volatile__("move %0, $sp" : "=r" (sp));
     81	sp &= THREAD_MASK;
     82
     83	/*
     84	 * Check for stack overflow: is there less than STACK_WARN free?
     85	 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
     86	 */
     87	if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
     88		printk("do_IRQ: stack overflow: %ld\n",
     89		       sp - sizeof(struct thread_info));
     90		dump_stack();
     91	}
     92}
     93#else
     94static inline void check_stack_overflow(void) {}
     95#endif
     96
     97
     98/*
     99 * do_IRQ handles all normal device IRQ's (the special
    100 * SMP cross-CPU interrupts have their own specific
    101 * handlers).
    102 */
    103void __irq_entry do_IRQ(unsigned int irq)
    104{
    105	irq_enter();
    106	check_stack_overflow();
    107	generic_handle_irq(irq);
    108	irq_exit();
    109}
    110
    111#ifdef CONFIG_IRQ_DOMAIN
    112void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
    113{
    114	irq_enter();
    115	check_stack_overflow();
    116	generic_handle_domain_irq(domain, hwirq);
    117	irq_exit();
    118}
    119#endif