cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i8259.c (7052B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * i8259 interrupt controller driver.
      4 */
      5#undef DEBUG
      6
      7#include <linux/ioport.h>
      8#include <linux/interrupt.h>
      9#include <linux/irqdomain.h>
     10#include <linux/kernel.h>
     11#include <linux/delay.h>
     12#include <asm/io.h>
     13#include <asm/i8259.h>
     14
     15static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
     16
     17static unsigned char cached_8259[2] = { 0xff, 0xff };
     18#define cached_A1 (cached_8259[0])
     19#define cached_21 (cached_8259[1])
     20
     21static DEFINE_RAW_SPINLOCK(i8259_lock);
     22
     23static struct irq_domain *i8259_host;
     24
     25/*
     26 * Acknowledge the IRQ using either the PCI host bridge's interrupt
     27 * acknowledge feature or poll.  How i8259_init() is called determines
     28 * which is called.  It should be noted that polling is broken on some
     29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
     30 */
     31unsigned int i8259_irq(void)
     32{
     33	int irq;
     34	int lock = 0;
     35
     36	/* Either int-ack or poll for the IRQ */
     37	if (pci_intack)
     38		irq = readb(pci_intack);
     39	else {
     40		raw_spin_lock(&i8259_lock);
     41		lock = 1;
     42
     43		/* Perform an interrupt acknowledge cycle on controller 1. */
     44		outb(0x0C, 0x20);		/* prepare for poll */
     45		irq = inb(0x20) & 7;
     46		if (irq == 2 ) {
     47			/*
     48			 * Interrupt is cascaded so perform interrupt
     49			 * acknowledge on controller 2.
     50			 */
     51			outb(0x0C, 0xA0);	/* prepare for poll */
     52			irq = (inb(0xA0) & 7) + 8;
     53		}
     54	}
     55
     56	if (irq == 7) {
     57		/*
     58		 * This may be a spurious interrupt.
     59		 *
     60		 * Read the interrupt status register (ISR). If the most
     61		 * significant bit is not set then there is no valid
     62		 * interrupt.
     63		 */
     64		if (!pci_intack)
     65			outb(0x0B, 0x20);	/* ISR register */
     66		if(~inb(0x20) & 0x80)
     67			irq = 0;
     68	} else if (irq == 0xff)
     69		irq = 0;
     70
     71	if (lock)
     72		raw_spin_unlock(&i8259_lock);
     73	return irq;
     74}
     75
     76static void i8259_mask_and_ack_irq(struct irq_data *d)
     77{
     78	unsigned long flags;
     79
     80	raw_spin_lock_irqsave(&i8259_lock, flags);
     81	if (d->irq > 7) {
     82		cached_A1 |= 1 << (d->irq-8);
     83		inb(0xA1); 	/* DUMMY */
     84		outb(cached_A1, 0xA1);
     85		outb(0x20, 0xA0);	/* Non-specific EOI */
     86		outb(0x20, 0x20);	/* Non-specific EOI to cascade */
     87	} else {
     88		cached_21 |= 1 << d->irq;
     89		inb(0x21); 	/* DUMMY */
     90		outb(cached_21, 0x21);
     91		outb(0x20, 0x20);	/* Non-specific EOI */
     92	}
     93	raw_spin_unlock_irqrestore(&i8259_lock, flags);
     94}
     95
     96static void i8259_set_irq_mask(int irq_nr)
     97{
     98	outb(cached_A1,0xA1);
     99	outb(cached_21,0x21);
    100}
    101
    102static void i8259_mask_irq(struct irq_data *d)
    103{
    104	unsigned long flags;
    105
    106	pr_debug("i8259_mask_irq(%d)\n", d->irq);
    107
    108	raw_spin_lock_irqsave(&i8259_lock, flags);
    109	if (d->irq < 8)
    110		cached_21 |= 1 << d->irq;
    111	else
    112		cached_A1 |= 1 << (d->irq-8);
    113	i8259_set_irq_mask(d->irq);
    114	raw_spin_unlock_irqrestore(&i8259_lock, flags);
    115}
    116
    117static void i8259_unmask_irq(struct irq_data *d)
    118{
    119	unsigned long flags;
    120
    121	pr_debug("i8259_unmask_irq(%d)\n", d->irq);
    122
    123	raw_spin_lock_irqsave(&i8259_lock, flags);
    124	if (d->irq < 8)
    125		cached_21 &= ~(1 << d->irq);
    126	else
    127		cached_A1 &= ~(1 << (d->irq-8));
    128	i8259_set_irq_mask(d->irq);
    129	raw_spin_unlock_irqrestore(&i8259_lock, flags);
    130}
    131
    132static struct irq_chip i8259_pic = {
    133	.name		= "i8259",
    134	.irq_mask	= i8259_mask_irq,
    135	.irq_disable	= i8259_mask_irq,
    136	.irq_unmask	= i8259_unmask_irq,
    137	.irq_mask_ack	= i8259_mask_and_ack_irq,
    138};
    139
    140static struct resource pic1_iores = {
    141	.name = "8259 (master)",
    142	.start = 0x20,
    143	.end = 0x21,
    144	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
    145};
    146
    147static struct resource pic2_iores = {
    148	.name = "8259 (slave)",
    149	.start = 0xa0,
    150	.end = 0xa1,
    151	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
    152};
    153
    154static struct resource pic_edgectrl_iores = {
    155	.name = "8259 edge control",
    156	.start = 0x4d0,
    157	.end = 0x4d1,
    158	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
    159};
    160
    161static int i8259_host_match(struct irq_domain *h, struct device_node *node,
    162			    enum irq_domain_bus_token bus_token)
    163{
    164	struct device_node *of_node = irq_domain_get_of_node(h);
    165	return of_node == NULL || of_node == node;
    166}
    167
    168static int i8259_host_map(struct irq_domain *h, unsigned int virq,
    169			  irq_hw_number_t hw)
    170{
    171	pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
    172
    173	/* We block the internal cascade */
    174	if (hw == 2)
    175		irq_set_status_flags(virq, IRQ_NOREQUEST);
    176
    177	/* We use the level handler only for now, we might want to
    178	 * be more cautious here but that works for now
    179	 */
    180	irq_set_status_flags(virq, IRQ_LEVEL);
    181	irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
    182	return 0;
    183}
    184
    185static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
    186			    const u32 *intspec, unsigned int intsize,
    187			    irq_hw_number_t *out_hwirq, unsigned int *out_flags)
    188{
    189	static unsigned char map_isa_senses[4] = {
    190		IRQ_TYPE_LEVEL_LOW,
    191		IRQ_TYPE_LEVEL_HIGH,
    192		IRQ_TYPE_EDGE_FALLING,
    193		IRQ_TYPE_EDGE_RISING,
    194	};
    195
    196	*out_hwirq = intspec[0];
    197	if (intsize > 1 && intspec[1] < 4)
    198		*out_flags = map_isa_senses[intspec[1]];
    199	else
    200		*out_flags = IRQ_TYPE_NONE;
    201
    202	return 0;
    203}
    204
    205static const struct irq_domain_ops i8259_host_ops = {
    206	.match = i8259_host_match,
    207	.map = i8259_host_map,
    208	.xlate = i8259_host_xlate,
    209};
    210
    211struct irq_domain *__init i8259_get_host(void)
    212{
    213	return i8259_host;
    214}
    215
    216/**
    217 * i8259_init - Initialize the legacy controller
    218 * @node: device node of the legacy PIC (can be NULL, but then, it will match
    219 *        all interrupts, so beware)
    220 * @intack_addr: PCI interrupt acknowledge (real) address which will return
    221 *             	 the active irq from the 8259
    222 */
    223void i8259_init(struct device_node *node, unsigned long intack_addr)
    224{
    225	unsigned long flags;
    226
    227	/* initialize the controller */
    228	raw_spin_lock_irqsave(&i8259_lock, flags);
    229
    230	/* Mask all first */
    231	outb(0xff, 0xA1);
    232	outb(0xff, 0x21);
    233
    234	/* init master interrupt controller */
    235	outb(0x11, 0x20); /* Start init sequence */
    236	outb(0x00, 0x21); /* Vector base */
    237	outb(0x04, 0x21); /* edge triggered, Cascade (slave) on IRQ2 */
    238	outb(0x01, 0x21); /* Select 8086 mode */
    239
    240	/* init slave interrupt controller */
    241	outb(0x11, 0xA0); /* Start init sequence */
    242	outb(0x08, 0xA1); /* Vector base */
    243	outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
    244	outb(0x01, 0xA1); /* Select 8086 mode */
    245
    246	/* That thing is slow */
    247	udelay(100);
    248
    249	/* always read ISR */
    250	outb(0x0B, 0x20);
    251	outb(0x0B, 0xA0);
    252
    253	/* Unmask the internal cascade */
    254	cached_21 &= ~(1 << 2);
    255
    256	/* Set interrupt masks */
    257	outb(cached_A1, 0xA1);
    258	outb(cached_21, 0x21);
    259
    260	raw_spin_unlock_irqrestore(&i8259_lock, flags);
    261
    262	/* create a legacy host */
    263	i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
    264					   &i8259_host_ops, NULL);
    265	if (i8259_host == NULL) {
    266		printk(KERN_ERR "i8259: failed to allocate irq host !\n");
    267		return;
    268	}
    269
    270	/* reserve our resources */
    271	/* XXX should we continue doing that ? it seems to cause problems
    272	 * with further requesting of PCI IO resources for that range...
    273	 * need to look into it.
    274	 */
    275	request_resource(&ioport_resource, &pic1_iores);
    276	request_resource(&ioport_resource, &pic2_iores);
    277	request_resource(&ioport_resource, &pic_edgectrl_iores);
    278
    279	if (intack_addr != 0)
    280		pci_intack = ioremap(intack_addr, 1);
    281
    282	printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
    283}