cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq.c (10424B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
      4 */
      5#include <linux/kernel.h>
      6#include <linux/init.h>
      7#include <linux/linkage.h>
      8#include <linux/interrupt.h>
      9#include <linux/smp.h>
     10#include <linux/spinlock.h>
     11#include <linux/mm.h>
     12#include <linux/kernel_stat.h>
     13
     14#include <asm/errno.h>
     15#include <asm/irq_regs.h>
     16#include <asm/signal.h>
     17#include <asm/io.h>
     18
     19#include <asm/sibyte/bcm1480_regs.h>
     20#include <asm/sibyte/bcm1480_int.h>
     21#include <asm/sibyte/bcm1480_scd.h>
     22
     23#include <asm/sibyte/sb1250_uart.h>
     24#include <asm/sibyte/sb1250.h>
     25
     26/*
     27 * These are the routines that handle all the low level interrupt stuff.
     28 * Actions handled here are: initialization of the interrupt map, requesting of
     29 * interrupt lines by handlers, dispatching if interrupts to handlers, probing
     30 * for interrupt lines
     31 */
     32
     33#ifdef CONFIG_PCI
     34extern unsigned long ht_eoi_space;
     35#endif
     36
     37/* Store the CPU id (not the logical number) */
     38int bcm1480_irq_owner[BCM1480_NR_IRQS];
     39
     40static DEFINE_RAW_SPINLOCK(bcm1480_imr_lock);
     41
     42void bcm1480_mask_irq(int cpu, int irq)
     43{
     44	unsigned long flags, hl_spacing;
     45	u64 cur_ints;
     46
     47	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
     48	hl_spacing = 0;
     49	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
     50		hl_spacing = BCM1480_IMR_HL_SPACING;
     51		irq -= BCM1480_NR_IRQS_HALF;
     52	}
     53	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
     54	cur_ints |= (((u64) 1) << irq);
     55	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
     56	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
     57}
     58
     59void bcm1480_unmask_irq(int cpu, int irq)
     60{
     61	unsigned long flags, hl_spacing;
     62	u64 cur_ints;
     63
     64	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
     65	hl_spacing = 0;
     66	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
     67		hl_spacing = BCM1480_IMR_HL_SPACING;
     68		irq -= BCM1480_NR_IRQS_HALF;
     69	}
     70	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
     71	cur_ints &= ~(((u64) 1) << irq);
     72	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
     73	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
     74}
     75
     76#ifdef CONFIG_SMP
     77static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
     78				bool force)
     79{
     80	unsigned int irq_dirty, irq = d->irq;
     81	int i = 0, old_cpu, cpu, int_on, k;
     82	u64 cur_ints;
     83	unsigned long flags;
     84
     85	i = cpumask_first_and(mask, cpu_online_mask);
     86
     87	/* Convert logical CPU to physical CPU */
     88	cpu = cpu_logical_map(i);
     89
     90	/* Protect against other affinity changers and IMR manipulation */
     91	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
     92
     93	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
     94	old_cpu = bcm1480_irq_owner[irq];
     95	irq_dirty = irq;
     96	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
     97		irq_dirty -= BCM1480_NR_IRQS_HALF;
     98	}
     99
    100	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
    101		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
    102		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
    103		if (int_on) {
    104			/* If it was on, mask it */
    105			cur_ints |= (((u64) 1) << irq_dirty);
    106			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
    107		}
    108		bcm1480_irq_owner[irq] = cpu;
    109		if (int_on) {
    110			/* unmask for the new CPU */
    111			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
    112			cur_ints &= ~(((u64) 1) << irq_dirty);
    113			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
    114		}
    115	}
    116	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
    117
    118	return 0;
    119}
    120#endif
    121
    122
    123/*****************************************************************************/
    124
    125static void disable_bcm1480_irq(struct irq_data *d)
    126{
    127	unsigned int irq = d->irq;
    128
    129	bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
    130}
    131
    132static void enable_bcm1480_irq(struct irq_data *d)
    133{
    134	unsigned int irq = d->irq;
    135
    136	bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
    137}
    138
    139
    140static void ack_bcm1480_irq(struct irq_data *d)
    141{
    142	unsigned int irq_dirty, irq = d->irq;
    143	u64 pending;
    144	int k;
    145
    146	/*
    147	 * If the interrupt was an HT interrupt, now is the time to
    148	 * clear it.  NOTE: we assume the HT bridge was set up to
    149	 * deliver the interrupts to all CPUs (which makes affinity
    150	 * changing easier for us)
    151	 */
    152	irq_dirty = irq;
    153	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
    154		irq_dirty -= BCM1480_NR_IRQS_HALF;
    155	}
    156	for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */
    157		pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq],
    158						R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING))));
    159		pending &= ((u64)1 << (irq_dirty));
    160		if (pending) {
    161#ifdef CONFIG_SMP
    162			int i;
    163			for (i=0; i<NR_CPUS; i++) {
    164				/*
    165				 * Clear for all CPUs so an affinity switch
    166				 * doesn't find an old status
    167				 */
    168				__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(cpu_logical_map(i),
    169								R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
    170			}
    171#else
    172			__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
    173#endif
    174
    175			/*
    176			 * Generate EOI.  For Pass 1 parts, EOI is a nop.  For
    177			 * Pass 2, the LDT world may be edge-triggered, but
    178			 * this EOI shouldn't hurt.  If they are
    179			 * level-sensitive, the EOI is required.
    180			 */
    181#ifdef CONFIG_PCI
    182			if (ht_eoi_space)
    183				*(uint32_t *)(ht_eoi_space+(irq<<16)+(7<<2)) = 0;
    184#endif
    185		}
    186	}
    187	bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
    188}
    189
    190static struct irq_chip bcm1480_irq_type = {
    191	.name = "BCM1480-IMR",
    192	.irq_mask_ack = ack_bcm1480_irq,
    193	.irq_mask = disable_bcm1480_irq,
    194	.irq_unmask = enable_bcm1480_irq,
    195#ifdef CONFIG_SMP
    196	.irq_set_affinity = bcm1480_set_affinity
    197#endif
    198};
    199
    200void __init init_bcm1480_irqs(void)
    201{
    202	int i;
    203
    204	for (i = 0; i < BCM1480_NR_IRQS; i++) {
    205		irq_set_chip_and_handler(i, &bcm1480_irq_type,
    206					 handle_level_irq);
    207		bcm1480_irq_owner[i] = 0;
    208	}
    209}
    210
    211/*
    212 *  init_IRQ is called early in the boot sequence from init/main.c.  It
    213 *  is responsible for setting up the interrupt mapper and installing the
    214 *  handler that will be responsible for dispatching interrupts to the
    215 *  "right" place.
    216 */
    217/*
    218 * For now, map all interrupts to IP[2].  We could save
    219 * some cycles by parceling out system interrupts to different
    220 * IP lines, but keep it simple for bringup.  We'll also direct
    221 * all interrupts to a single CPU; we should probably route
    222 * PCI and LDT to one cpu and everything else to the other
    223 * to balance the load a bit.
    224 *
    225 * On the second cpu, everything is set to IP5, which is
    226 * ignored, EXCEPT the mailbox interrupt.  That one is
    227 * set to IP[2] so it is handled.  This is needed so we
    228 * can do cross-cpu function calls, as required by SMP
    229 */
    230
    231#define IMR_IP2_VAL	K_BCM1480_INT_MAP_I0
    232#define IMR_IP3_VAL	K_BCM1480_INT_MAP_I1
    233#define IMR_IP4_VAL	K_BCM1480_INT_MAP_I2
    234#define IMR_IP5_VAL	K_BCM1480_INT_MAP_I3
    235#define IMR_IP6_VAL	K_BCM1480_INT_MAP_I4
    236
    237void __init arch_init_irq(void)
    238{
    239	unsigned int i, cpu;
    240	u64 tmp;
    241	unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
    242		STATUSF_IP1 | STATUSF_IP0;
    243
    244	/* Default everything to IP2 */
    245	/* Start with _high registers which has no bit 0 interrupt source */
    246	for (i = 1; i < BCM1480_NR_IRQS_HALF; i++) {	/* was I0 */
    247		for (cpu = 0; cpu < 4; cpu++) {
    248			__raw_writeq(IMR_IP2_VAL,
    249				     IOADDR(A_BCM1480_IMR_REGISTER(cpu,
    250								   R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (i << 3)));
    251		}
    252	}
    253
    254	/* Now do _low registers */
    255	for (i = 0; i < BCM1480_NR_IRQS_HALF; i++) {
    256		for (cpu = 0; cpu < 4; cpu++) {
    257			__raw_writeq(IMR_IP2_VAL,
    258				     IOADDR(A_BCM1480_IMR_REGISTER(cpu,
    259								   R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) + (i << 3)));
    260		}
    261	}
    262
    263	init_bcm1480_irqs();
    264
    265	/*
    266	 * Map the high 16 bits of mailbox_0 registers to IP[3], for
    267	 * inter-cpu messages
    268	 */
    269	/* Was I1 */
    270	for (cpu = 0; cpu < 4; cpu++) {
    271		__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
    272						 (K_BCM1480_INT_MBOX_0_0 << 3)));
    273	}
    274
    275
    276	/* Clear the mailboxes.	 The firmware may leave them dirty */
    277	for (cpu = 0; cpu < 4; cpu++) {
    278		__raw_writeq(0xffffffffffffffffULL,
    279			     IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
    280		__raw_writeq(0xffffffffffffffffULL,
    281			     IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU)));
    282	}
    283
    284
    285	/* Mask everything except the high 16 bit of mailbox_0 registers for all cpus */
    286	tmp = ~((u64) 0) ^ ( (((u64) 1) << K_BCM1480_INT_MBOX_0_0));
    287	for (cpu = 0; cpu < 4; cpu++) {
    288		__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H)));
    289	}
    290	tmp = ~((u64) 0);
    291	for (cpu = 0; cpu < 4; cpu++) {
    292		__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L)));
    293	}
    294
    295	/*
    296	 * Note that the timer interrupts are also mapped, but this is
    297	 * done in bcm1480_time_init().	 Also, the profiling driver
    298	 * does its own management of IP7.
    299	 */
    300
    301	/* Enable necessary IPs, disable the rest */
    302	change_c0_status(ST0_IM, imask);
    303}
    304
    305extern void bcm1480_mailbox_interrupt(void);
    306
    307static inline void dispatch_ip2(void)
    308{
    309	unsigned long long mask_h, mask_l;
    310	unsigned int cpu = smp_processor_id();
    311	unsigned long base;
    312
    313	/*
    314	 * Default...we've hit an IP[2] interrupt, which means we've got to
    315	 * check the 1480 interrupt registers to figure out what to do.	 Need
    316	 * to detect which CPU we're on, now that smp_affinity is supported.
    317	 */
    318	base = A_BCM1480_IMR_MAPPER(cpu);
    319	mask_h = __raw_readq(
    320		IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
    321	mask_l = __raw_readq(
    322		IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));
    323
    324	if (mask_h) {
    325		if (mask_h ^ 1)
    326			do_IRQ(fls64(mask_h) - 1);
    327		else if (mask_l)
    328			do_IRQ(63 + fls64(mask_l));
    329	}
    330}
    331
    332asmlinkage void plat_irq_dispatch(void)
    333{
    334	unsigned int cpu = smp_processor_id();
    335	unsigned int pending;
    336
    337	pending = read_c0_cause() & read_c0_status();
    338
    339	if (pending & CAUSEF_IP4)
    340		do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
    341#ifdef CONFIG_SMP
    342	else if (pending & CAUSEF_IP3)
    343		bcm1480_mailbox_interrupt();
    344#endif
    345
    346	else if (pending & CAUSEF_IP2)
    347		dispatch_ip2();
    348}