cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

arm_gicv3.c (13194B)


      1/*
      2 * ARM Generic Interrupt Controller v3
      3 *
      4 * Copyright (c) 2015 Huawei.
      5 * Copyright (c) 2016 Linaro Limited
      6 * Written by Shlomo Pongratz, Peter Maydell
      7 *
      8 * This code is licensed under the GPL, version 2 or (at your option)
      9 * any later version.
     10 */
     11
     12/* This file contains implementation code for an interrupt controller
     13 * which implements the GICv3 architecture. Specifically this is where
     14 * the device class itself and the functions for handling interrupts
     15 * coming in and going out live.
     16 */
     17
     18#include "qemu/osdep.h"
     19#include "qapi/error.h"
     20#include "qemu/module.h"
     21#include "hw/intc/arm_gicv3.h"
     22#include "gicv3_internal.h"
     23
     24static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio)
     25{
     26    /* Return true if this IRQ at this priority should take
     27     * precedence over the current recorded highest priority
     28     * pending interrupt for this CPU. We also return true if
     29     * the current recorded highest priority pending interrupt
     30     * is the same as this one (a property which the calling code
     31     * relies on).
     32     */
     33    if (prio < cs->hppi.prio) {
     34        return true;
     35    }
     36    /* If multiple pending interrupts have the same priority then it is an
     37     * IMPDEF choice which of them to signal to the CPU. We choose to
     38     * signal the one with the lowest interrupt number.
     39     */
     40    if (prio == cs->hppi.prio && irq <= cs->hppi.irq) {
     41        return true;
     42    }
     43    return false;
     44}
     45
     46static uint32_t gicd_int_pending(GICv3State *s, int irq)
     47{
     48    /* Recalculate which distributor interrupts are actually pending
     49     * in the group of 32 interrupts starting at irq (which should be a multiple
     50     * of 32), and return a 32-bit integer which has a bit set for each
     51     * interrupt that is eligible to be signaled to the CPU interface.
     52     *
     53     * An interrupt is pending if:
     54     *  + the PENDING latch is set OR it is level triggered and the input is 1
     55     *  + its ENABLE bit is set
     56     *  + the GICD enable bit for its group is set
     57     *  + its ACTIVE bit is not set (otherwise it would be Active+Pending)
     58     * Conveniently we can bulk-calculate this with bitwise operations.
     59     */
     60    uint32_t pend, grpmask;
     61    uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
     62    uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
     63    uint32_t level = *gic_bmp_ptr32(s->level, irq);
     64    uint32_t group = *gic_bmp_ptr32(s->group, irq);
     65    uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
     66    uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
     67    uint32_t active = *gic_bmp_ptr32(s->active, irq);
     68
     69    pend = pending | (~edge_trigger & level);
     70    pend &= enable;
     71    pend &= ~active;
     72
     73    if (s->gicd_ctlr & GICD_CTLR_DS) {
     74        grpmod = 0;
     75    }
     76
     77    grpmask = 0;
     78    if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
     79        grpmask |= group;
     80    }
     81    if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
     82        grpmask |= (~group & grpmod);
     83    }
     84    if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
     85        grpmask |= (~group & ~grpmod);
     86    }
     87    pend &= grpmask;
     88
     89    return pend;
     90}
     91
     92static uint32_t gicr_int_pending(GICv3CPUState *cs)
     93{
     94    /* Recalculate which redistributor interrupts are actually pending,
     95     * and return a 32-bit integer which has a bit set for each interrupt
     96     * that is eligible to be signaled to the CPU interface.
     97     *
     98     * An interrupt is pending if:
     99     *  + the PENDING latch is set OR it is level triggered and the input is 1
    100     *  + its ENABLE bit is set
    101     *  + the GICD enable bit for its group is set
    102     *  + its ACTIVE bit is not set (otherwise it would be Active+Pending)
    103     * Conveniently we can bulk-calculate this with bitwise operations.
    104     */
    105    uint32_t pend, grpmask, grpmod;
    106
    107    pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
    108    pend &= cs->gicr_ienabler0;
    109    pend &= ~cs->gicr_iactiver0;
    110
    111    if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
    112        grpmod = 0;
    113    } else {
    114        grpmod = cs->gicr_igrpmodr0;
    115    }
    116
    117    grpmask = 0;
    118    if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
    119        grpmask |= cs->gicr_igroupr0;
    120    }
    121    if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
    122        grpmask |= (~cs->gicr_igroupr0 & grpmod);
    123    }
    124    if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
    125        grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
    126    }
    127    pend &= grpmask;
    128
    129    return pend;
    130}
    131
    132/* Update the interrupt status after state in a redistributor
    133 * or CPU interface has changed, but don't tell the CPU i/f.
    134 */
    135static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
    136{
    137    /* Find the highest priority pending interrupt among the
    138     * redistributor interrupts (SGIs and PPIs).
    139     */
    140    bool seenbetter = false;
    141    uint8_t prio;
    142    int i;
    143    uint32_t pend;
    144
    145    /* Find out which redistributor interrupts are eligible to be
    146     * signaled to the CPU interface.
    147     */
    148    pend = gicr_int_pending(cs);
    149
    150    if (pend) {
    151        for (i = 0; i < GIC_INTERNAL; i++) {
    152            if (!(pend & (1 << i))) {
    153                continue;
    154            }
    155            prio = cs->gicr_ipriorityr[i];
    156            if (irqbetter(cs, i, prio)) {
    157                cs->hppi.irq = i;
    158                cs->hppi.prio = prio;
    159                seenbetter = true;
    160            }
    161        }
    162    }
    163
    164    if (seenbetter) {
    165        cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
    166    }
    167
    168    if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
    169        (cs->hpplpi.prio != 0xff)) {
    170        if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
    171            cs->hppi.irq = cs->hpplpi.irq;
    172            cs->hppi.prio = cs->hpplpi.prio;
    173            cs->hppi.grp = cs->hpplpi.grp;
    174            seenbetter = true;
    175        }
    176    }
    177
    178    /* If the best interrupt we just found would preempt whatever
    179     * was the previous best interrupt before this update, then
    180     * we know it's definitely the best one now.
    181     * If we didn't find an interrupt that would preempt the previous
    182     * best, and the previous best is outside our range (or there was no
    183     * previous pending interrupt at all), then that is still valid, and
    184     * we leave it as the best.
    185     * Otherwise, we need to do a full update (because the previous best
    186     * interrupt has reduced in priority and any other interrupt could
    187     * now be the new best one).
    188     */
    189    if (!seenbetter && cs->hppi.prio != 0xff && cs->hppi.irq < GIC_INTERNAL) {
    190        gicv3_full_update_noirqset(cs->gic);
    191    }
    192}
    193
    194/* Update the GIC status after state in a redistributor or
    195 * CPU interface has changed, and inform the CPU i/f of
    196 * its new highest priority pending interrupt.
    197 */
    198void gicv3_redist_update(GICv3CPUState *cs)
    199{
    200    gicv3_redist_update_noirqset(cs);
    201    gicv3_cpuif_update(cs);
    202}
    203
    204/* Update the GIC status after state in the distributor has
    205 * changed affecting @len interrupts starting at @start,
    206 * but don't tell the CPU i/f.
    207 */
    208static void gicv3_update_noirqset(GICv3State *s, int start, int len)
    209{
    210    int i;
    211    uint8_t prio;
    212    uint32_t pend = 0;
    213
    214    assert(start >= GIC_INTERNAL);
    215    assert(len > 0);
    216
    217    for (i = 0; i < s->num_cpu; i++) {
    218        s->cpu[i].seenbetter = false;
    219    }
    220
    221    /* Find the highest priority pending interrupt in this range. */
    222    for (i = start; i < start + len; i++) {
    223        GICv3CPUState *cs;
    224
    225        if (i == start || (i & 0x1f) == 0) {
    226            /* Calculate the next 32 bits worth of pending status */
    227            pend = gicd_int_pending(s, i & ~0x1f);
    228        }
    229
    230        if (!(pend & (1 << (i & 0x1f)))) {
    231            continue;
    232        }
    233        cs = s->gicd_irouter_target[i];
    234        if (!cs) {
    235            /* Interrupts targeting no implemented CPU should remain pending
    236             * and not be forwarded to any CPU.
    237             */
    238            continue;
    239        }
    240        prio = s->gicd_ipriority[i];
    241        if (irqbetter(cs, i, prio)) {
    242            cs->hppi.irq = i;
    243            cs->hppi.prio = prio;
    244            cs->seenbetter = true;
    245        }
    246    }
    247
    248    /* If the best interrupt we just found would preempt whatever
    249     * was the previous best interrupt before this update, then
    250     * we know it's definitely the best one now.
    251     * If we didn't find an interrupt that would preempt the previous
    252     * best, and the previous best is outside our range (or there was
    253     * no previous pending interrupt at all), then that
    254     * is still valid, and we leave it as the best.
    255     * Otherwise, we need to do a full update (because the previous best
    256     * interrupt has reduced in priority and any other interrupt could
    257     * now be the new best one).
    258     */
    259    for (i = 0; i < s->num_cpu; i++) {
    260        GICv3CPUState *cs = &s->cpu[i];
    261
    262        if (cs->seenbetter) {
    263            cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
    264        }
    265
    266        if (!cs->seenbetter && cs->hppi.prio != 0xff &&
    267            cs->hppi.irq >= start && cs->hppi.irq < start + len) {
    268            gicv3_full_update_noirqset(s);
    269            break;
    270        }
    271    }
    272}
    273
    274void gicv3_update(GICv3State *s, int start, int len)
    275{
    276    int i;
    277
    278    gicv3_update_noirqset(s, start, len);
    279    for (i = 0; i < s->num_cpu; i++) {
    280        gicv3_cpuif_update(&s->cpu[i]);
    281    }
    282}
    283
    284void gicv3_full_update_noirqset(GICv3State *s)
    285{
    286    /* Completely recalculate the GIC status from scratch, but
    287     * don't update any outbound IRQ lines.
    288     */
    289    int i;
    290
    291    for (i = 0; i < s->num_cpu; i++) {
    292        s->cpu[i].hppi.prio = 0xff;
    293    }
    294
    295    /* Note that we can guarantee that these functions will not
    296     * recursively call back into gicv3_full_update(), because
    297     * at each point the "previous best" is always outside the
    298     * range we ask them to update.
    299     */
    300    gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
    301
    302    for (i = 0; i < s->num_cpu; i++) {
    303        gicv3_redist_update_noirqset(&s->cpu[i]);
    304    }
    305}
    306
    307void gicv3_full_update(GICv3State *s)
    308{
    309    /* Completely recalculate the GIC status from scratch, including
    310     * updating outbound IRQ lines.
    311     */
    312    int i;
    313
    314    gicv3_full_update_noirqset(s);
    315    for (i = 0; i < s->num_cpu; i++) {
    316        gicv3_cpuif_update(&s->cpu[i]);
    317    }
    318}
    319
    320/* Process a change in an external IRQ input. */
    321static void gicv3_set_irq(void *opaque, int irq, int level)
    322{
    323    /* Meaning of the 'irq' parameter:
    324     *  [0..N-1] : external interrupts
    325     *  [N..N+31] : PPI (internal) interrupts for CPU 0
    326     *  [N+32..N+63] : PPI (internal interrupts for CPU 1
    327     *  ...
    328     */
    329    GICv3State *s = opaque;
    330
    331    if (irq < (s->num_irq - GIC_INTERNAL)) {
    332        /* external interrupt (SPI) */
    333        gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
    334    } else {
    335        /* per-cpu interrupt (PPI) */
    336        int cpu;
    337
    338        irq -= (s->num_irq - GIC_INTERNAL);
    339        cpu = irq / GIC_INTERNAL;
    340        irq %= GIC_INTERNAL;
    341        assert(cpu < s->num_cpu);
    342        /* Raising SGIs via this function would be a bug in how the board
    343         * model wires up interrupts.
    344         */
    345        assert(irq >= GIC_NR_SGIS);
    346        gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
    347    }
    348}
    349
    350static void arm_gicv3_post_load(GICv3State *s)
    351{
    352    int i;
    353    /* Recalculate our cached idea of the current highest priority
    354     * pending interrupt, but don't set IRQ or FIQ lines.
    355     */
    356    for (i = 0; i < s->num_cpu; i++) {
    357        gicv3_redist_update_lpi(&s->cpu[i]);
    358    }
    359    gicv3_full_update_noirqset(s);
    360    /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
    361    gicv3_cache_all_target_cpustates(s);
    362}
    363
    364static const MemoryRegionOps gic_ops[] = {
    365    {
    366        .read_with_attrs = gicv3_dist_read,
    367        .write_with_attrs = gicv3_dist_write,
    368        .endianness = DEVICE_NATIVE_ENDIAN,
    369    },
    370    {
    371        .read_with_attrs = gicv3_redist_read,
    372        .write_with_attrs = gicv3_redist_write,
    373        .endianness = DEVICE_NATIVE_ENDIAN,
    374    }
    375};
    376
    377static void arm_gic_realize(DeviceState *dev, Error **errp)
    378{
    379    /* Device instance realize function for the GIC sysbus device */
    380    GICv3State *s = ARM_GICV3(dev);
    381    ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
    382    Error *local_err = NULL;
    383
    384    agc->parent_realize(dev, &local_err);
    385    if (local_err) {
    386        error_propagate(errp, local_err);
    387        return;
    388    }
    389
    390    if (s->nb_redist_regions != 1) {
    391        error_setg(errp, "VGICv3 redist region number(%d) not equal to 1",
    392                   s->nb_redist_regions);
    393        return;
    394    }
    395
    396    gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops, &local_err);
    397    if (local_err) {
    398        error_propagate(errp, local_err);
    399        return;
    400    }
    401
    402    gicv3_init_cpuif(s);
    403}
    404
    405static void arm_gicv3_class_init(ObjectClass *klass, void *data)
    406{
    407    DeviceClass *dc = DEVICE_CLASS(klass);
    408    ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
    409    ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
    410
    411    agcc->post_load = arm_gicv3_post_load;
    412    device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
    413}
    414
    415static const TypeInfo arm_gicv3_info = {
    416    .name = TYPE_ARM_GICV3,
    417    .parent = TYPE_ARM_GICV3_COMMON,
    418    .instance_size = sizeof(GICv3State),
    419    .class_init = arm_gicv3_class_init,
    420    .class_size = sizeof(ARMGICv3Class),
    421};
    422
    423static void arm_gicv3_register_types(void)
    424{
    425    type_register_static(&arm_gicv3_info);
    426}
    427
    428type_init(arm_gicv3_register_types)