cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

arm_gicv3_cpuif.c (85062B)


      1/*
      2 * ARM Generic Interrupt Controller v3
      3 *
      4 * Copyright (c) 2016 Linaro Limited
      5 * Written by Peter Maydell
      6 *
      7 * This code is licensed under the GPL, version 2 or (at your option)
      8 * any later version.
      9 */
     10
     11/* This file contains the code for the system register interface
     12 * portions of the GICv3.
     13 */
     14
     15#include "qemu/osdep.h"
     16#include "qemu/bitops.h"
     17#include "qemu/log.h"
     18#include "qemu/main-loop.h"
     19#include "trace.h"
     20#include "gicv3_internal.h"
     21#include "hw/irq.h"
     22#include "cpu.h"
     23
     24void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s)
     25{
     26    ARMCPU *arm_cpu = ARM_CPU(cpu);
     27    CPUARMState *env = &arm_cpu->env;
     28
     29    env->gicv3state = (void *)s;
     30};
     31
     32static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
     33{
     34    return env->gicv3state;
     35}
     36
     37static bool gicv3_use_ns_bank(CPUARMState *env)
     38{
     39    /* Return true if we should use the NonSecure bank for a banked GIC
     40     * CPU interface register. Note that this differs from the
     41     * access_secure_reg() function because GICv3 banked registers are
     42     * banked even for AArch64, unlike the other CPU system registers.
     43     */
     44    return !arm_is_secure_below_el3(env);
     45}
     46
     47/* The minimum BPR for the virtual interface is a configurable property */
     48static inline int icv_min_vbpr(GICv3CPUState *cs)
     49{
     50    return 7 - cs->vprebits;
     51}
     52
     53/* Simple accessor functions for LR fields */
     54static uint32_t ich_lr_vintid(uint64_t lr)
     55{
     56    return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
     57}
     58
     59static uint32_t ich_lr_pintid(uint64_t lr)
     60{
     61    return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
     62}
     63
     64static uint32_t ich_lr_prio(uint64_t lr)
     65{
     66    return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
     67}
     68
     69static int ich_lr_state(uint64_t lr)
     70{
     71    return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
     72}
     73
     74static bool icv_access(CPUARMState *env, int hcr_flags)
     75{
     76    /* Return true if this ICC_ register access should really be
     77     * directed to an ICV_ access. hcr_flags is a mask of
     78     * HCR_EL2 bits to check: we treat this as an ICV_ access
     79     * if we are in NS EL1 and at least one of the specified
     80     * HCR_EL2 bits is set.
     81     *
     82     * ICV registers fall into four categories:
     83     *  * access if NS EL1 and HCR_EL2.FMO == 1:
     84     *    all ICV regs with '0' in their name
     85     *  * access if NS EL1 and HCR_EL2.IMO == 1:
     86     *    all ICV regs with '1' in their name
     87     *  * access if NS EL1 and either IMO or FMO == 1:
     88     *    CTLR, DIR, PMR, RPR
     89     */
     90    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
     91    bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
     92
     93    return flagmatch && arm_current_el(env) == 1
     94        && !arm_is_secure_below_el3(env);
     95}
     96
     97static int read_vbpr(GICv3CPUState *cs, int grp)
     98{
     99    /* Read VBPR value out of the VMCR field (caller must handle
    100     * VCBPR effects if required)
    101     */
    102    if (grp == GICV3_G0) {
    103        return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
    104                     ICH_VMCR_EL2_VBPR0_LENGTH);
    105    } else {
    106        return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
    107                         ICH_VMCR_EL2_VBPR1_LENGTH);
    108    }
    109}
    110
    111static void write_vbpr(GICv3CPUState *cs, int grp, int value)
    112{
    113    /* Write new VBPR1 value, handling the "writing a value less than
    114     * the minimum sets it to the minimum" semantics.
    115     */
    116    int min = icv_min_vbpr(cs);
    117
    118    if (grp != GICV3_G0) {
    119        min++;
    120    }
    121
    122    value = MAX(value, min);
    123
    124    if (grp == GICV3_G0) {
    125        cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
    126                                     ICH_VMCR_EL2_VBPR0_LENGTH, value);
    127    } else {
    128        cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
    129                                     ICH_VMCR_EL2_VBPR1_LENGTH, value);
    130    }
    131}
    132
    133static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
    134{
    135    /* Return a mask word which clears the unimplemented priority bits
    136     * from a priority value for a virtual interrupt. (Not to be confused
    137     * with the group priority, whose mask depends on the value of VBPR
    138     * for the interrupt group.)
    139     */
    140    return ~0U << (8 - cs->vpribits);
    141}
    142
    143static int ich_highest_active_virt_prio(GICv3CPUState *cs)
    144{
    145    /* Calculate the current running priority based on the set bits
    146     * in the ICH Active Priority Registers.
    147     */
    148    int i;
    149    int aprmax = 1 << (cs->vprebits - 5);
    150
    151    assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
    152
    153    for (i = 0; i < aprmax; i++) {
    154        uint32_t apr = cs->ich_apr[GICV3_G0][i] |
    155            cs->ich_apr[GICV3_G1NS][i];
    156
    157        if (!apr) {
    158            continue;
    159        }
    160        return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
    161    }
    162    /* No current active interrupts: return idle priority */
    163    return 0xff;
    164}
    165
    166static int hppvi_index(GICv3CPUState *cs)
    167{
    168    /* Return the list register index of the highest priority pending
    169     * virtual interrupt, as per the HighestPriorityVirtualInterrupt
    170     * pseudocode. If no pending virtual interrupts, return -1.
    171     */
    172    int idx = -1;
    173    int i;
    174    /* Note that a list register entry with a priority of 0xff will
    175     * never be reported by this function; this is the architecturally
    176     * correct behaviour.
    177     */
    178    int prio = 0xff;
    179
    180    if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
    181        /* Both groups disabled, definitely nothing to do */
    182        return idx;
    183    }
    184
    185    for (i = 0; i < cs->num_list_regs; i++) {
    186        uint64_t lr = cs->ich_lr_el2[i];
    187        int thisprio;
    188
    189        if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
    190            /* Not Pending */
    191            continue;
    192        }
    193
    194        /* Ignore interrupts if relevant group enable not set */
    195        if (lr & ICH_LR_EL2_GROUP) {
    196            if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    197                continue;
    198            }
    199        } else {
    200            if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
    201                continue;
    202            }
    203        }
    204
    205        thisprio = ich_lr_prio(lr);
    206
    207        if (thisprio < prio) {
    208            prio = thisprio;
    209            idx = i;
    210        }
    211    }
    212
    213    return idx;
    214}
    215
    216static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
    217{
    218    /* Return a mask word which clears the subpriority bits from
    219     * a priority value for a virtual interrupt in the specified group.
    220     * This depends on the VBPR value.
    221     * If using VBPR0 then:
    222     *  a BPR of 0 means the group priority bits are [7:1];
    223     *  a BPR of 1 means they are [7:2], and so on down to
    224     *  a BPR of 7 meaning no group priority bits at all.
    225     * If using VBPR1 then:
    226     *  a BPR of 0 is impossible (the minimum value is 1)
    227     *  a BPR of 1 means the group priority bits are [7:1];
    228     *  a BPR of 2 means they are [7:2], and so on down to
    229     *  a BPR of 7 meaning the group priority is [7].
    230     *
    231     * Which BPR to use depends on the group of the interrupt and
    232     * the current ICH_VMCR_EL2.VCBPR settings.
    233     *
    234     * This corresponds to the VGroupBits() pseudocode.
    235     */
    236    int bpr;
    237
    238    if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
    239        group = GICV3_G0;
    240    }
    241
    242    bpr = read_vbpr(cs, group);
    243    if (group == GICV3_G1NS) {
    244        assert(bpr > 0);
    245        bpr--;
    246    }
    247
    248    return ~0U << (bpr + 1);
    249}
    250
    251static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
    252{
    253    /* Return true if we can signal this virtual interrupt defined by
    254     * the given list register value; see the pseudocode functions
    255     * CanSignalVirtualInterrupt and CanSignalVirtualInt.
    256     * Compare also icc_hppi_can_preempt() which is the non-virtual
    257     * equivalent of these checks.
    258     */
    259    int grp;
    260    uint32_t mask, prio, rprio, vpmr;
    261
    262    if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
    263        /* Virtual interface disabled */
    264        return false;
    265    }
    266
    267    /* We don't need to check that this LR is in Pending state because
    268     * that has already been done in hppvi_index().
    269     */
    270
    271    prio = ich_lr_prio(lr);
    272    vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    273                     ICH_VMCR_EL2_VPMR_LENGTH);
    274
    275    if (prio >= vpmr) {
    276        /* Priority mask masks this interrupt */
    277        return false;
    278    }
    279
    280    rprio = ich_highest_active_virt_prio(cs);
    281    if (rprio == 0xff) {
    282        /* No running interrupt so we can preempt */
    283        return true;
    284    }
    285
    286    grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
    287
    288    mask = icv_gprio_mask(cs, grp);
    289
    290    /* We only preempt a running interrupt if the pending interrupt's
    291     * group priority is sufficient (the subpriorities are not considered).
    292     */
    293    if ((prio & mask) < (rprio & mask)) {
    294        return true;
    295    }
    296
    297    return false;
    298}
    299
    300static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
    301                                                uint32_t *misr)
    302{
    303    /* Return a set of bits indicating the EOI maintenance interrupt status
    304     * for each list register. The EOI maintenance interrupt status is
    305     * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
    306     * (see the GICv3 spec for the ICH_EISR_EL2 register).
    307     * If misr is not NULL then we should also collect the information
    308     * about the MISR.EOI, MISR.NP and MISR.U bits.
    309     */
    310    uint32_t value = 0;
    311    int validcount = 0;
    312    bool seenpending = false;
    313    int i;
    314
    315    for (i = 0; i < cs->num_list_regs; i++) {
    316        uint64_t lr = cs->ich_lr_el2[i];
    317
    318        if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
    319            == ICH_LR_EL2_EOI) {
    320            value |= (1 << i);
    321        }
    322        if ((lr & ICH_LR_EL2_STATE_MASK)) {
    323            validcount++;
    324        }
    325        if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
    326            seenpending = true;
    327        }
    328    }
    329
    330    if (misr) {
    331        if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
    332            *misr |= ICH_MISR_EL2_U;
    333        }
    334        if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
    335            *misr |= ICH_MISR_EL2_NP;
    336        }
    337        if (value) {
    338            *misr |= ICH_MISR_EL2_EOI;
    339        }
    340    }
    341    return value;
    342}
    343
    344static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
    345{
    346    /* Return a set of bits indicating the maintenance interrupt status
    347     * (as seen in the ICH_MISR_EL2 register).
    348     */
    349    uint32_t value = 0;
    350
    351    /* Scan list registers and fill in the U, NP and EOI bits */
    352    eoi_maintenance_interrupt_state(cs, &value);
    353
    354    if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
    355        value |= ICH_MISR_EL2_LRENP;
    356    }
    357
    358    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
    359        (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
    360        value |= ICH_MISR_EL2_VGRP0E;
    361    }
    362
    363    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
    364        !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    365        value |= ICH_MISR_EL2_VGRP0D;
    366    }
    367    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
    368        (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    369        value |= ICH_MISR_EL2_VGRP1E;
    370    }
    371
    372    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
    373        !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    374        value |= ICH_MISR_EL2_VGRP1D;
    375    }
    376
    377    return value;
    378}
    379
    380static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
    381{
    382    /* Tell the CPU about any pending virtual interrupts or
    383     * maintenance interrupts, following a change to the state
    384     * of the CPU interface relevant to virtual interrupts.
    385     *
    386     * CAUTION: this function will call qemu_set_irq() on the
    387     * CPU maintenance IRQ line, which is typically wired up
    388     * to the GIC as a per-CPU interrupt. This means that it
    389     * will recursively call back into the GIC code via
    390     * gicv3_redist_set_irq() and thus into the CPU interface code's
    391     * gicv3_cpuif_update(). It is therefore important that this
    392     * function is only called as the final action of a CPU interface
    393     * register write implementation, after all the GIC state
    394     * fields have been updated. gicv3_cpuif_update() also must
    395     * not cause this function to be called, but that happens
    396     * naturally as a result of there being no architectural
    397     * linkage between the physical and virtual GIC logic.
    398     */
    399    int idx;
    400    int irqlevel = 0;
    401    int fiqlevel = 0;
    402    int maintlevel = 0;
    403    ARMCPU *cpu = ARM_CPU(cs->cpu);
    404
    405    idx = hppvi_index(cs);
    406    trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
    407    if (idx >= 0) {
    408        uint64_t lr = cs->ich_lr_el2[idx];
    409
    410        if (icv_hppi_can_preempt(cs, lr)) {
    411            /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
    412            if (lr & ICH_LR_EL2_GROUP) {
    413                irqlevel = 1;
    414            } else {
    415                fiqlevel = 1;
    416            }
    417        }
    418    }
    419
    420    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
    421        maintenance_interrupt_state(cs) != 0) {
    422        maintlevel = 1;
    423    }
    424
    425    trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
    426                                    irqlevel, maintlevel);
    427
    428    qemu_set_irq(cs->parent_vfiq, fiqlevel);
    429    qemu_set_irq(cs->parent_virq, irqlevel);
    430    qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
    431}
    432
    433static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
    434{
    435    GICv3CPUState *cs = icc_cs_from_env(env);
    436    int regno = ri->opc2 & 3;
    437    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
    438    uint64_t value = cs->ich_apr[grp][regno];
    439
    440    trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
    441    return value;
    442}
    443
    444static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
    445                         uint64_t value)
    446{
    447    GICv3CPUState *cs = icc_cs_from_env(env);
    448    int regno = ri->opc2 & 3;
    449    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
    450
    451    trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
    452
    453    cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
    454
    455    gicv3_cpuif_virt_update(cs);
    456    return;
    457}
    458
    459static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    460{
    461    GICv3CPUState *cs = icc_cs_from_env(env);
    462    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
    463    uint64_t bpr;
    464    bool satinc = false;
    465
    466    if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
    467        /* reads return bpr0 + 1 saturated to 7, writes ignored */
    468        grp = GICV3_G0;
    469        satinc = true;
    470    }
    471
    472    bpr = read_vbpr(cs, grp);
    473
    474    if (satinc) {
    475        bpr++;
    476        bpr = MIN(bpr, 7);
    477    }
    478
    479    trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
    480
    481    return bpr;
    482}
    483
    484static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    485                          uint64_t value)
    486{
    487    GICv3CPUState *cs = icc_cs_from_env(env);
    488    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
    489
    490    trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
    491                              gicv3_redist_affid(cs), value);
    492
    493    if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
    494        /* reads return bpr0 + 1 saturated to 7, writes ignored */
    495        return;
    496    }
    497
    498    write_vbpr(cs, grp, value);
    499
    500    gicv3_cpuif_virt_update(cs);
    501}
    502
    503static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    504{
    505    GICv3CPUState *cs = icc_cs_from_env(env);
    506    uint64_t value;
    507
    508    value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    509                      ICH_VMCR_EL2_VPMR_LENGTH);
    510
    511    trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
    512    return value;
    513}
    514
    515static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    516                          uint64_t value)
    517{
    518    GICv3CPUState *cs = icc_cs_from_env(env);
    519
    520    trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
    521
    522    value &= icv_fullprio_mask(cs);
    523
    524    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    525                                 ICH_VMCR_EL2_VPMR_LENGTH, value);
    526
    527    gicv3_cpuif_virt_update(cs);
    528}
    529
    530static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
    531{
    532    GICv3CPUState *cs = icc_cs_from_env(env);
    533    int enbit;
    534    uint64_t value;
    535
    536    enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
    537    value = extract64(cs->ich_vmcr_el2, enbit, 1);
    538
    539    trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
    540                                gicv3_redist_affid(cs), value);
    541    return value;
    542}
    543
    544static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
    545                             uint64_t value)
    546{
    547    GICv3CPUState *cs = icc_cs_from_env(env);
    548    int enbit;
    549
    550    trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
    551                                 gicv3_redist_affid(cs), value);
    552
    553    enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
    554
    555    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
    556    gicv3_cpuif_virt_update(cs);
    557}
    558
    559static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    560{
    561    GICv3CPUState *cs = icc_cs_from_env(env);
    562    uint64_t value;
    563
    564    /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
    565     * should match the ones reported in ich_vtr_read().
    566     */
    567    value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
    568        (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
    569
    570    if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
    571        value |= ICC_CTLR_EL1_EOIMODE;
    572    }
    573
    574    if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
    575        value |= ICC_CTLR_EL1_CBPR;
    576    }
    577
    578    trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
    579    return value;
    580}
    581
    582static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    583                               uint64_t value)
    584{
    585    GICv3CPUState *cs = icc_cs_from_env(env);
    586
    587    trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
    588
    589    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
    590                                 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
    591    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
    592                                 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
    593
    594    gicv3_cpuif_virt_update(cs);
    595}
    596
    597static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    598{
    599    GICv3CPUState *cs = icc_cs_from_env(env);
    600    int prio = ich_highest_active_virt_prio(cs);
    601
    602    trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
    603    return prio;
    604}
    605
    606static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
    607{
    608    GICv3CPUState *cs = icc_cs_from_env(env);
    609    int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
    610    int idx = hppvi_index(cs);
    611    uint64_t value = INTID_SPURIOUS;
    612
    613    if (idx >= 0) {
    614        uint64_t lr = cs->ich_lr_el2[idx];
    615        int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
    616
    617        if (grp == thisgrp) {
    618            value = ich_lr_vintid(lr);
    619        }
    620    }
    621
    622    trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
    623    return value;
    624}
    625
    626static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
    627{
    628    /* Activate the interrupt in the specified list register
    629     * by moving it from Pending to Active state, and update the
    630     * Active Priority Registers.
    631     */
    632    uint32_t mask = icv_gprio_mask(cs, grp);
    633    int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
    634    int aprbit = prio >> (8 - cs->vprebits);
    635    int regno = aprbit / 32;
    636    int regbit = aprbit % 32;
    637
    638    cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
    639    cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
    640    cs->ich_apr[grp][regno] |= (1 << regbit);
    641}
    642
    643static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
    644{
    645    GICv3CPUState *cs = icc_cs_from_env(env);
    646    int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
    647    int idx = hppvi_index(cs);
    648    uint64_t intid = INTID_SPURIOUS;
    649
    650    if (idx >= 0) {
    651        uint64_t lr = cs->ich_lr_el2[idx];
    652        int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
    653
    654        if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
    655            intid = ich_lr_vintid(lr);
    656            if (intid < INTID_SECURE) {
    657                icv_activate_irq(cs, idx, grp);
    658            } else {
    659                /* Interrupt goes from Pending to Invalid */
    660                cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
    661                /* We will now return the (bogus) ID from the list register,
    662                 * as per the pseudocode.
    663                 */
    664            }
    665        }
    666    }
    667
    668    trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
    669                             gicv3_redist_affid(cs), intid);
    670
    671    gicv3_cpuif_virt_update(cs);
    672
    673    return intid;
    674}
    675
    676static int icc_highest_active_prio(GICv3CPUState *cs)
    677{
    678    /* Calculate the current running priority based on the set bits
    679     * in the Active Priority Registers.
    680     */
    681    int i;
    682
    683    for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
    684        uint32_t apr = cs->icc_apr[GICV3_G0][i] |
    685            cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
    686
    687        if (!apr) {
    688            continue;
    689        }
    690        return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
    691    }
    692    /* No current active interrupts: return idle priority */
    693    return 0xff;
    694}
    695
    696static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
    697{
    698    /* Return a mask word which clears the subpriority bits from
    699     * a priority value for an interrupt in the specified group.
    700     * This depends on the BPR value. For CBPR0 (S or NS):
    701     *  a BPR of 0 means the group priority bits are [7:1];
    702     *  a BPR of 1 means they are [7:2], and so on down to
    703     *  a BPR of 7 meaning no group priority bits at all.
    704     * For CBPR1 NS:
    705     *  a BPR of 0 is impossible (the minimum value is 1)
    706     *  a BPR of 1 means the group priority bits are [7:1];
    707     *  a BPR of 2 means they are [7:2], and so on down to
    708     *  a BPR of 7 meaning the group priority is [7].
    709     *
    710     * Which BPR to use depends on the group of the interrupt and
    711     * the current ICC_CTLR.CBPR settings.
    712     *
    713     * This corresponds to the GroupBits() pseudocode.
    714     */
    715    int bpr;
    716
    717    if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
    718        (group == GICV3_G1NS &&
    719         cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
    720        group = GICV3_G0;
    721    }
    722
    723    bpr = cs->icc_bpr[group] & 7;
    724
    725    if (group == GICV3_G1NS) {
    726        assert(bpr > 0);
    727        bpr--;
    728    }
    729
    730    return ~0U << (bpr + 1);
    731}
    732
    733static bool icc_no_enabled_hppi(GICv3CPUState *cs)
    734{
    735    /* Return true if there is no pending interrupt, or the
    736     * highest priority pending interrupt is in a group which has been
    737     * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
    738     */
    739    return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
    740}
    741
    742static bool icc_hppi_can_preempt(GICv3CPUState *cs)
    743{
    744    /* Return true if we have a pending interrupt of sufficient
    745     * priority to preempt.
    746     */
    747    int rprio;
    748    uint32_t mask;
    749
    750    if (icc_no_enabled_hppi(cs)) {
    751        return false;
    752    }
    753
    754    if (cs->hppi.prio >= cs->icc_pmr_el1) {
    755        /* Priority mask masks this interrupt */
    756        return false;
    757    }
    758
    759    rprio = icc_highest_active_prio(cs);
    760    if (rprio == 0xff) {
    761        /* No currently running interrupt so we can preempt */
    762        return true;
    763    }
    764
    765    mask = icc_gprio_mask(cs, cs->hppi.grp);
    766
    767    /* We only preempt a running interrupt if the pending interrupt's
    768     * group priority is sufficient (the subpriorities are not considered).
    769     */
    770    if ((cs->hppi.prio & mask) < (rprio & mask)) {
    771        return true;
    772    }
    773
    774    return false;
    775}
    776
    777void gicv3_cpuif_update(GICv3CPUState *cs)
    778{
    779    /* Tell the CPU about its highest priority pending interrupt */
    780    int irqlevel = 0;
    781    int fiqlevel = 0;
    782    ARMCPU *cpu = ARM_CPU(cs->cpu);
    783    CPUARMState *env = &cpu->env;
    784
    785    g_assert(qemu_mutex_iothread_locked());
    786
    787    trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
    788                             cs->hppi.grp, cs->hppi.prio);
    789
    790    if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
    791        /* If a Security-enabled GIC sends a G1S interrupt to a
    792         * Security-disabled CPU, we must treat it as if it were G0.
    793         */
    794        cs->hppi.grp = GICV3_G0;
    795    }
    796
    797    if (icc_hppi_can_preempt(cs)) {
    798        /* We have an interrupt: should we signal it as IRQ or FIQ?
    799         * This is described in the GICv3 spec section 4.6.2.
    800         */
    801        bool isfiq;
    802
    803        switch (cs->hppi.grp) {
    804        case GICV3_G0:
    805            isfiq = true;
    806            break;
    807        case GICV3_G1:
    808            isfiq = (!arm_is_secure(env) ||
    809                     (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
    810            break;
    811        case GICV3_G1NS:
    812            isfiq = arm_is_secure(env);
    813            break;
    814        default:
    815            g_assert_not_reached();
    816        }
    817
    818        if (isfiq) {
    819            fiqlevel = 1;
    820        } else {
    821            irqlevel = 1;
    822        }
    823    }
    824
    825    trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
    826
    827    qemu_set_irq(cs->parent_fiq, fiqlevel);
    828    qemu_set_irq(cs->parent_irq, irqlevel);
    829}
    830
    831static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    832{
    833    GICv3CPUState *cs = icc_cs_from_env(env);
    834    uint32_t value = cs->icc_pmr_el1;
    835
    836    if (icv_access(env, HCR_FMO | HCR_IMO)) {
    837        return icv_pmr_read(env, ri);
    838    }
    839
    840    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
    841        (env->cp15.scr_el3 & SCR_FIQ)) {
    842        /* NS access and Group 0 is inaccessible to NS: return the
    843         * NS view of the current priority
    844         */
    845        if ((value & 0x80) == 0) {
    846            /* Secure priorities not visible to NS */
    847            value = 0;
    848        } else if (value != 0xff) {
    849            value = (value << 1) & 0xff;
    850        }
    851    }
    852
    853    trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
    854
    855    return value;
    856}
    857
    858static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    859                          uint64_t value)
    860{
    861    GICv3CPUState *cs = icc_cs_from_env(env);
    862
    863    if (icv_access(env, HCR_FMO | HCR_IMO)) {
    864        return icv_pmr_write(env, ri, value);
    865    }
    866
    867    trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
    868
    869    value &= 0xff;
    870
    871    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
    872        (env->cp15.scr_el3 & SCR_FIQ)) {
    873        /* NS access and Group 0 is inaccessible to NS: return the
    874         * NS view of the current priority
    875         */
    876        if (!(cs->icc_pmr_el1 & 0x80)) {
    877            /* Current PMR in the secure range, don't allow NS to change it */
    878            return;
    879        }
    880        value = (value >> 1) | 0x80;
    881    }
    882    cs->icc_pmr_el1 = value;
    883    gicv3_cpuif_update(cs);
    884}
    885
    886static void icc_activate_irq(GICv3CPUState *cs, int irq)
    887{
    888    /* Move the interrupt from the Pending state to Active, and update
    889     * the Active Priority Registers
    890     */
    891    uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
    892    int prio = cs->hppi.prio & mask;
    893    int aprbit = prio >> 1;
    894    int regno = aprbit / 32;
    895    int regbit = aprbit % 32;
    896
    897    cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
    898
    899    if (irq < GIC_INTERNAL) {
    900        cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
    901        cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
    902        gicv3_redist_update(cs);
    903    } else if (irq < GICV3_LPI_INTID_START) {
    904        gicv3_gicd_active_set(cs->gic, irq);
    905        gicv3_gicd_pending_clear(cs->gic, irq);
    906        gicv3_update(cs->gic, irq, 1);
    907    } else {
    908        gicv3_redist_lpi_pending(cs, irq, 0);
    909    }
    910}
    911
    912static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
    913{
    914    /* Return the highest priority pending interrupt register value
    915     * for group 0.
    916     */
    917    bool irq_is_secure;
    918
    919    if (cs->hppi.prio == 0xff) {
    920        return INTID_SPURIOUS;
    921    }
    922
    923    /* Check whether we can return the interrupt or if we should return
    924     * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
    925     * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
    926     * is always zero.)
    927     */
    928    irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
    929                     (cs->hppi.grp != GICV3_G1NS));
    930
    931    if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
    932        return INTID_SPURIOUS;
    933    }
    934    if (irq_is_secure && !arm_is_secure(env)) {
    935        /* Secure interrupts not visible to Nonsecure */
    936        return INTID_SPURIOUS;
    937    }
    938
    939    if (cs->hppi.grp != GICV3_G0) {
    940        /* Indicate to EL3 that there's a Group 1 interrupt for the other
    941         * state pending.
    942         */
    943        return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
    944    }
    945
    946    return cs->hppi.irq;
    947}
    948
    949static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
    950{
    951    /* Return the highest priority pending interrupt register value
    952     * for group 1.
    953     */
    954    bool irq_is_secure;
    955
    956    if (cs->hppi.prio == 0xff) {
    957        return INTID_SPURIOUS;
    958    }
    959
    960    /* Check whether we can return the interrupt or if we should return
    961     * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
    962     * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
    963     * is always zero.)
    964     */
    965    irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
    966                     (cs->hppi.grp != GICV3_G1NS));
    967
    968    if (cs->hppi.grp == GICV3_G0) {
    969        /* Group 0 interrupts not visible via HPPIR1 */
    970        return INTID_SPURIOUS;
    971    }
    972    if (irq_is_secure) {
    973        if (!arm_is_secure(env)) {
    974            /* Secure interrupts not visible in Non-secure */
    975            return INTID_SPURIOUS;
    976        }
    977    } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
    978        /* Group 1 non-secure interrupts not visible in Secure EL1 */
    979        return INTID_SPURIOUS;
    980    }
    981
    982    return cs->hppi.irq;
    983}
    984
    985static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
    986{
    987    GICv3CPUState *cs = icc_cs_from_env(env);
    988    uint64_t intid;
    989
    990    if (icv_access(env, HCR_FMO)) {
    991        return icv_iar_read(env, ri);
    992    }
    993
    994    if (!icc_hppi_can_preempt(cs)) {
    995        intid = INTID_SPURIOUS;
    996    } else {
    997        intid = icc_hppir0_value(cs, env);
    998    }
    999
   1000    if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
   1001        icc_activate_irq(cs, intid);
   1002    }
   1003
   1004    trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
   1005    return intid;
   1006}
   1007
   1008static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1009{
   1010    GICv3CPUState *cs = icc_cs_from_env(env);
   1011    uint64_t intid;
   1012
   1013    if (icv_access(env, HCR_IMO)) {
   1014        return icv_iar_read(env, ri);
   1015    }
   1016
   1017    if (!icc_hppi_can_preempt(cs)) {
   1018        intid = INTID_SPURIOUS;
   1019    } else {
   1020        intid = icc_hppir1_value(cs, env);
   1021    }
   1022
   1023    if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
   1024        icc_activate_irq(cs, intid);
   1025    }
   1026
   1027    trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
   1028    return intid;
   1029}
   1030
   1031static void icc_drop_prio(GICv3CPUState *cs, int grp)
   1032{
   1033    /* Drop the priority of the currently active interrupt in
   1034     * the specified group.
   1035     *
   1036     * Note that we can guarantee (because of the requirement to nest
   1037     * ICC_IAR reads [which activate an interrupt and raise priority]
   1038     * with ICC_EOIR writes [which drop the priority for the interrupt])
   1039     * that the interrupt we're being called for is the highest priority
   1040     * active interrupt, meaning that it has the lowest set bit in the
   1041     * APR registers.
   1042     *
   1043     * If the guest does not honour the ordering constraints then the
   1044     * behaviour of the GIC is UNPREDICTABLE, which for us means that
   1045     * the values of the APR registers might become incorrect and the
   1046     * running priority will be wrong, so interrupts that should preempt
   1047     * might not do so, and interrupts that should not preempt might do so.
   1048     */
   1049    int i;
   1050
   1051    for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
   1052        uint64_t *papr = &cs->icc_apr[grp][i];
   1053
   1054        if (!*papr) {
   1055            continue;
   1056        }
   1057        /* Clear the lowest set bit */
   1058        *papr &= *papr - 1;
   1059        break;
   1060    }
   1061
   1062    /* running priority change means we need an update for this cpu i/f */
   1063    gicv3_cpuif_update(cs);
   1064}
   1065
   1066static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
   1067{
   1068    /* Return true if we should split priority drop and interrupt
   1069     * deactivation, ie whether the relevant EOIMode bit is set.
   1070     */
   1071    if (arm_is_el3_or_mon(env)) {
   1072        return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
   1073    }
   1074    if (arm_is_secure_below_el3(env)) {
   1075        return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
   1076    } else {
   1077        return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
   1078    }
   1079}
   1080
   1081static int icc_highest_active_group(GICv3CPUState *cs)
   1082{
   1083    /* Return the group with the highest priority active interrupt.
   1084     * We can do this by just comparing the APRs to see which one
   1085     * has the lowest set bit.
   1086     * (If more than one group is active at the same priority then
   1087     * we're in UNPREDICTABLE territory.)
   1088     */
   1089    int i;
   1090
   1091    for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
   1092        int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
   1093        int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
   1094        int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
   1095
   1096        if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
   1097            return GICV3_G1NS;
   1098        }
   1099        if (g1ctz < g0ctz) {
   1100            return GICV3_G1;
   1101        }
   1102        if (g0ctz < 32) {
   1103            return GICV3_G0;
   1104        }
   1105    }
   1106    /* No set active bits? UNPREDICTABLE; return -1 so the caller
   1107     * ignores the spurious EOI attempt.
   1108     */
   1109    return -1;
   1110}
   1111
   1112static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
   1113{
   1114    if (irq < GIC_INTERNAL) {
   1115        cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
   1116        gicv3_redist_update(cs);
   1117    } else {
   1118        gicv3_gicd_active_clear(cs->gic, irq);
   1119        gicv3_update(cs->gic, irq, 1);
   1120    }
   1121}
   1122
   1123static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
   1124{
   1125    /* Return true if we should split priority drop and interrupt
   1126     * deactivation, ie whether the virtual EOIMode bit is set.
   1127     */
   1128    return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
   1129}
   1130
   1131static int icv_find_active(GICv3CPUState *cs, int irq)
   1132{
   1133    /* Given an interrupt number for an active interrupt, return the index
   1134     * of the corresponding list register, or -1 if there is no match.
   1135     * Corresponds to FindActiveVirtualInterrupt pseudocode.
   1136     */
   1137    int i;
   1138
   1139    for (i = 0; i < cs->num_list_regs; i++) {
   1140        uint64_t lr = cs->ich_lr_el2[i];
   1141
   1142        if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
   1143            return i;
   1144        }
   1145    }
   1146
   1147    return -1;
   1148}
   1149
   1150static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
   1151{
   1152    /* Deactivate the interrupt in the specified list register index */
   1153    uint64_t lr = cs->ich_lr_el2[idx];
   1154
   1155    if (lr & ICH_LR_EL2_HW) {
   1156        /* Deactivate the associated physical interrupt */
   1157        int pirq = ich_lr_pintid(lr);
   1158
   1159        if (pirq < INTID_SECURE) {
   1160            icc_deactivate_irq(cs, pirq);
   1161        }
   1162    }
   1163
   1164    /* Clear the 'active' part of the state, so ActivePending->Pending
   1165     * and Active->Invalid.
   1166     */
   1167    lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
   1168    cs->ich_lr_el2[idx] = lr;
   1169}
   1170
   1171static void icv_increment_eoicount(GICv3CPUState *cs)
   1172{
   1173    /* Increment the EOICOUNT field in ICH_HCR_EL2 */
   1174    int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
   1175                             ICH_HCR_EL2_EOICOUNT_LENGTH);
   1176
   1177    cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
   1178                                ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
   1179}
   1180
   1181static int icv_drop_prio(GICv3CPUState *cs)
   1182{
   1183    /* Drop the priority of the currently active virtual interrupt
   1184     * (favouring group 0 if there is a set active bit at
   1185     * the same priority for both group 0 and group 1).
   1186     * Return the priority value for the bit we just cleared,
   1187     * or 0xff if no bits were set in the AP registers at all.
   1188     * Note that though the ich_apr[] are uint64_t only the low
   1189     * 32 bits are actually relevant.
   1190     */
   1191    int i;
   1192    int aprmax = 1 << (cs->vprebits - 5);
   1193
   1194    assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
   1195
   1196    for (i = 0; i < aprmax; i++) {
   1197        uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
   1198        uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
   1199        int apr0count, apr1count;
   1200
   1201        if (!*papr0 && !*papr1) {
   1202            continue;
   1203        }
   1204
   1205        /* We can't just use the bit-twiddling hack icc_drop_prio() does
   1206         * because we need to return the bit number we cleared so
   1207         * it can be compared against the list register's priority field.
   1208         */
   1209        apr0count = ctz32(*papr0);
   1210        apr1count = ctz32(*papr1);
   1211
   1212        if (apr0count <= apr1count) {
   1213            *papr0 &= *papr0 - 1;
   1214            return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
   1215        } else {
   1216            *papr1 &= *papr1 - 1;
   1217            return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
   1218        }
   1219    }
   1220    return 0xff;
   1221}
   1222
   1223static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1224                          uint64_t value)
   1225{
   1226    /* Deactivate interrupt */
   1227    GICv3CPUState *cs = icc_cs_from_env(env);
   1228    int idx;
   1229    int irq = value & 0xffffff;
   1230
   1231    trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
   1232
   1233    if (irq >= GICV3_MAXIRQ) {
   1234        /* Also catches special interrupt numbers and LPIs */
   1235        return;
   1236    }
   1237
   1238    if (!icv_eoi_split(env, cs)) {
   1239        return;
   1240    }
   1241
   1242    idx = icv_find_active(cs, irq);
   1243
   1244    if (idx < 0) {
   1245        /* No list register matching this, so increment the EOI count
   1246         * (might trigger a maintenance interrupt)
   1247         */
   1248        icv_increment_eoicount(cs);
   1249    } else {
   1250        icv_deactivate_irq(cs, idx);
   1251    }
   1252
   1253    gicv3_cpuif_virt_update(cs);
   1254}
   1255
   1256static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1257                           uint64_t value)
   1258{
   1259    /* End of Interrupt */
   1260    GICv3CPUState *cs = icc_cs_from_env(env);
   1261    int irq = value & 0xffffff;
   1262    int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
   1263    int idx, dropprio;
   1264
   1265    trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
   1266                               gicv3_redist_affid(cs), value);
   1267
   1268    if (irq >= GICV3_MAXIRQ) {
   1269        /* Also catches special interrupt numbers and LPIs */
   1270        return;
   1271    }
   1272
   1273    /* We implement the IMPDEF choice of "drop priority before doing
   1274     * error checks" (because that lets us avoid scanning the AP
   1275     * registers twice).
   1276     */
   1277    dropprio = icv_drop_prio(cs);
   1278    if (dropprio == 0xff) {
   1279        /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
   1280         * whether the list registers are checked in this
   1281         * situation; we choose not to.
   1282         */
   1283        return;
   1284    }
   1285
   1286    idx = icv_find_active(cs, irq);
   1287
   1288    if (idx < 0) {
   1289        /* No valid list register corresponding to EOI ID */
   1290        icv_increment_eoicount(cs);
   1291    } else {
   1292        uint64_t lr = cs->ich_lr_el2[idx];
   1293        int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
   1294        int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
   1295
   1296        if (thisgrp == grp && lr_gprio == dropprio) {
   1297            if (!icv_eoi_split(env, cs)) {
   1298                /* Priority drop and deactivate not split: deactivate irq now */
   1299                icv_deactivate_irq(cs, idx);
   1300            }
   1301        }
   1302    }
   1303
   1304    gicv3_cpuif_virt_update(cs);
   1305}
   1306
   1307static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1308                           uint64_t value)
   1309{
   1310    /* End of Interrupt */
   1311    GICv3CPUState *cs = icc_cs_from_env(env);
   1312    int irq = value & 0xffffff;
   1313    int grp;
   1314    bool is_eoir0 = ri->crm == 8;
   1315
   1316    if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
   1317        icv_eoir_write(env, ri, value);
   1318        return;
   1319    }
   1320
   1321    trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
   1322                               gicv3_redist_affid(cs), value);
   1323
   1324    if ((irq >= cs->gic->num_irq) &&
   1325        !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
   1326        /* This handles two cases:
   1327         * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
   1328         * to the GICC_EOIR, the GIC ignores that write.
   1329         * 2. If software writes the number of a non-existent interrupt
   1330         * this must be a subcase of "value written does not match the last
   1331         * valid interrupt value read from the Interrupt Acknowledge
   1332         * register" and so this is UNPREDICTABLE. We choose to ignore it.
   1333         */
   1334        return;
   1335    }
   1336
   1337    grp = icc_highest_active_group(cs);
   1338    switch (grp) {
   1339    case GICV3_G0:
   1340        if (!is_eoir0) {
   1341            return;
   1342        }
   1343        if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
   1344            && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
   1345            return;
   1346        }
   1347        break;
   1348    case GICV3_G1:
   1349        if (is_eoir0) {
   1350            return;
   1351        }
   1352        if (!arm_is_secure(env)) {
   1353            return;
   1354        }
   1355        break;
   1356    case GICV3_G1NS:
   1357        if (is_eoir0) {
   1358            return;
   1359        }
   1360        if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
   1361            return;
   1362        }
   1363        break;
   1364    default:
   1365        qemu_log_mask(LOG_GUEST_ERROR,
   1366                      "%s: IRQ %d isn't active\n", __func__, irq);
   1367        return;
   1368    }
   1369
   1370    icc_drop_prio(cs, grp);
   1371
   1372    if (!icc_eoi_split(env, cs)) {
   1373        /* Priority drop and deactivate not split: deactivate irq now */
   1374        icc_deactivate_irq(cs, irq);
   1375    }
   1376}
   1377
   1378static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1379{
   1380    GICv3CPUState *cs = icc_cs_from_env(env);
   1381    uint64_t value;
   1382
   1383    if (icv_access(env, HCR_FMO)) {
   1384        return icv_hppir_read(env, ri);
   1385    }
   1386
   1387    value = icc_hppir0_value(cs, env);
   1388    trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
   1389    return value;
   1390}
   1391
   1392static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1393{
   1394    GICv3CPUState *cs = icc_cs_from_env(env);
   1395    uint64_t value;
   1396
   1397    if (icv_access(env, HCR_IMO)) {
   1398        return icv_hppir_read(env, ri);
   1399    }
   1400
   1401    value = icc_hppir1_value(cs, env);
   1402    trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
   1403    return value;
   1404}
   1405
   1406static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1407{
   1408    GICv3CPUState *cs = icc_cs_from_env(env);
   1409    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
   1410    bool satinc = false;
   1411    uint64_t bpr;
   1412
   1413    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1414        return icv_bpr_read(env, ri);
   1415    }
   1416
   1417    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1418        grp = GICV3_G1NS;
   1419    }
   1420
   1421    if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
   1422        (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
   1423        /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
   1424         * modify BPR0
   1425         */
   1426        grp = GICV3_G0;
   1427    }
   1428
   1429    if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
   1430        (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
   1431        /* reads return bpr0 + 1 sat to 7, writes ignored */
   1432        grp = GICV3_G0;
   1433        satinc = true;
   1434    }
   1435
   1436    bpr = cs->icc_bpr[grp];
   1437    if (satinc) {
   1438        bpr++;
   1439        bpr = MIN(bpr, 7);
   1440    }
   1441
   1442    trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
   1443
   1444    return bpr;
   1445}
   1446
   1447static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1448                          uint64_t value)
   1449{
   1450    GICv3CPUState *cs = icc_cs_from_env(env);
   1451    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
   1452    uint64_t minval;
   1453
   1454    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1455        icv_bpr_write(env, ri, value);
   1456        return;
   1457    }
   1458
   1459    trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
   1460                              gicv3_redist_affid(cs), value);
   1461
   1462    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1463        grp = GICV3_G1NS;
   1464    }
   1465
   1466    if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
   1467        (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
   1468        /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
   1469         * modify BPR0
   1470         */
   1471        grp = GICV3_G0;
   1472    }
   1473
   1474    if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
   1475        (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
   1476        /* reads return bpr0 + 1 sat to 7, writes ignored */
   1477        return;
   1478    }
   1479
   1480    minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
   1481    if (value < minval) {
   1482        value = minval;
   1483    }
   1484
   1485    cs->icc_bpr[grp] = value & 7;
   1486    gicv3_cpuif_update(cs);
   1487}
   1488
   1489static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1490{
   1491    GICv3CPUState *cs = icc_cs_from_env(env);
   1492    uint64_t value;
   1493
   1494    int regno = ri->opc2 & 3;
   1495    int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
   1496
   1497    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1498        return icv_ap_read(env, ri);
   1499    }
   1500
   1501    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1502        grp = GICV3_G1NS;
   1503    }
   1504
   1505    value = cs->icc_apr[grp][regno];
   1506
   1507    trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   1508    return value;
   1509}
   1510
   1511static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1512                         uint64_t value)
   1513{
   1514    GICv3CPUState *cs = icc_cs_from_env(env);
   1515
   1516    int regno = ri->opc2 & 3;
   1517    int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
   1518
   1519    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1520        icv_ap_write(env, ri, value);
   1521        return;
   1522    }
   1523
   1524    trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   1525
   1526    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1527        grp = GICV3_G1NS;
   1528    }
   1529
   1530    /* It's not possible to claim that a Non-secure interrupt is active
   1531     * at a priority outside the Non-secure range (128..255), since this
   1532     * would otherwise allow malicious NS code to block delivery of S interrupts
   1533     * by writing a bad value to these registers.
   1534     */
   1535    if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
   1536        return;
   1537    }
   1538
   1539    cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
   1540    gicv3_cpuif_update(cs);
   1541}
   1542
   1543static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1544                          uint64_t value)
   1545{
   1546    /* Deactivate interrupt */
   1547    GICv3CPUState *cs = icc_cs_from_env(env);
   1548    int irq = value & 0xffffff;
   1549    bool irq_is_secure, single_sec_state, irq_is_grp0;
   1550    bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
   1551
   1552    if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1553        icv_dir_write(env, ri, value);
   1554        return;
   1555    }
   1556
   1557    trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
   1558
   1559    if (irq >= cs->gic->num_irq) {
   1560        /* Also catches special interrupt numbers and LPIs */
   1561        return;
   1562    }
   1563
   1564    if (!icc_eoi_split(env, cs)) {
   1565        return;
   1566    }
   1567
   1568    int grp = gicv3_irq_group(cs->gic, cs, irq);
   1569
   1570    single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
   1571    irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
   1572    irq_is_grp0 = grp == GICV3_G0;
   1573
   1574    /* Check whether we're allowed to deactivate this interrupt based
   1575     * on its group and the current CPU state.
   1576     * These checks are laid out to correspond to the spec's pseudocode.
   1577     */
   1578    route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
   1579    route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
   1580    /* No need to include !IsSecure in route_*_to_el2 as it's only
   1581     * tested in cases where we know !IsSecure is true.
   1582     */
   1583    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
   1584    route_fiq_to_el2 = hcr_el2 & HCR_FMO;
   1585    route_irq_to_el2 = hcr_el2 & HCR_IMO;
   1586
   1587    switch (arm_current_el(env)) {
   1588    case 3:
   1589        break;
   1590    case 2:
   1591        if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
   1592            break;
   1593        }
   1594        if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
   1595            break;
   1596        }
   1597        return;
   1598    case 1:
   1599        if (!arm_is_secure_below_el3(env)) {
   1600            if (single_sec_state && irq_is_grp0 &&
   1601                !route_fiq_to_el3 && !route_fiq_to_el2) {
   1602                break;
   1603            }
   1604            if (!irq_is_secure && !irq_is_grp0 &&
   1605                !route_irq_to_el3 && !route_irq_to_el2) {
   1606                break;
   1607            }
   1608        } else {
   1609            if (irq_is_grp0 && !route_fiq_to_el3) {
   1610                break;
   1611            }
   1612            if (!irq_is_grp0 &&
   1613                (!irq_is_secure || !single_sec_state) &&
   1614                !route_irq_to_el3) {
   1615                break;
   1616            }
   1617        }
   1618        return;
   1619    default:
   1620        g_assert_not_reached();
   1621    }
   1622
   1623    icc_deactivate_irq(cs, irq);
   1624}
   1625
   1626static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1627{
   1628    GICv3CPUState *cs = icc_cs_from_env(env);
   1629    int prio;
   1630
   1631    if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1632        return icv_rpr_read(env, ri);
   1633    }
   1634
   1635    prio = icc_highest_active_prio(cs);
   1636
   1637    if (arm_feature(env, ARM_FEATURE_EL3) &&
   1638        !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
   1639        /* NS GIC access and Group 0 is inaccessible to NS */
   1640        if ((prio & 0x80) == 0) {
   1641            /* NS mustn't see priorities in the Secure half of the range */
   1642            prio = 0;
   1643        } else if (prio != 0xff) {
   1644            /* Non-idle priority: show the Non-secure view of it */
   1645            prio = (prio << 1) & 0xff;
   1646        }
   1647    }
   1648
   1649    trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
   1650    return prio;
   1651}
   1652
   1653static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
   1654                             uint64_t value, int grp, bool ns)
   1655{
   1656    GICv3State *s = cs->gic;
   1657
   1658    /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
   1659    uint64_t aff = extract64(value, 48, 8) << 16 |
   1660        extract64(value, 32, 8) << 8 |
   1661        extract64(value, 16, 8);
   1662    uint32_t targetlist = extract64(value, 0, 16);
   1663    uint32_t irq = extract64(value, 24, 4);
   1664    bool irm = extract64(value, 40, 1);
   1665    int i;
   1666
   1667    if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
   1668        /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
   1669         * interrupts as Group 0 interrupts and must send Secure Group 0
   1670         * interrupts to the target CPUs.
   1671         */
   1672        grp = GICV3_G0;
   1673    }
   1674
   1675    trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
   1676                                 aff, targetlist);
   1677
   1678    for (i = 0; i < s->num_cpu; i++) {
   1679        GICv3CPUState *ocs = &s->cpu[i];
   1680
   1681        if (irm) {
   1682            /* IRM == 1 : route to all CPUs except self */
   1683            if (cs == ocs) {
   1684                continue;
   1685            }
   1686        } else {
   1687            /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
   1688             * where the corresponding bit is set in targetlist
   1689             */
   1690            int aff0;
   1691
   1692            if (ocs->gicr_typer >> 40 != aff) {
   1693                continue;
   1694            }
   1695            aff0 = extract64(ocs->gicr_typer, 32, 8);
   1696            if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
   1697                continue;
   1698            }
   1699        }
   1700
   1701        /* The redistributor will check against its own GICR_NSACR as needed */
   1702        gicv3_redist_send_sgi(ocs, grp, irq, ns);
   1703    }
   1704}
   1705
   1706static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1707                           uint64_t value)
   1708{
   1709    /* Generate Secure Group 0 SGI. */
   1710    GICv3CPUState *cs = icc_cs_from_env(env);
   1711    bool ns = !arm_is_secure(env);
   1712
   1713    icc_generate_sgi(env, cs, value, GICV3_G0, ns);
   1714}
   1715
   1716static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1717                           uint64_t value)
   1718{
   1719    /* Generate Group 1 SGI for the current Security state */
   1720    GICv3CPUState *cs = icc_cs_from_env(env);
   1721    int grp;
   1722    bool ns = !arm_is_secure(env);
   1723
   1724    grp = ns ? GICV3_G1NS : GICV3_G1;
   1725    icc_generate_sgi(env, cs, value, grp, ns);
   1726}
   1727
   1728static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1729                             uint64_t value)
   1730{
   1731    /* Generate Group 1 SGI for the Security state that is not
   1732     * the current state
   1733     */
   1734    GICv3CPUState *cs = icc_cs_from_env(env);
   1735    int grp;
   1736    bool ns = !arm_is_secure(env);
   1737
   1738    grp = ns ? GICV3_G1 : GICV3_G1NS;
   1739    icc_generate_sgi(env, cs, value, grp, ns);
   1740}
   1741
   1742static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1743{
   1744    GICv3CPUState *cs = icc_cs_from_env(env);
   1745    int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
   1746    uint64_t value;
   1747
   1748    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1749        return icv_igrpen_read(env, ri);
   1750    }
   1751
   1752    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1753        grp = GICV3_G1NS;
   1754    }
   1755
   1756    value = cs->icc_igrpen[grp];
   1757    trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
   1758                                gicv3_redist_affid(cs), value);
   1759    return value;
   1760}
   1761
   1762static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1763                             uint64_t value)
   1764{
   1765    GICv3CPUState *cs = icc_cs_from_env(env);
   1766    int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
   1767
   1768    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1769        icv_igrpen_write(env, ri, value);
   1770        return;
   1771    }
   1772
   1773    trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
   1774                                 gicv3_redist_affid(cs), value);
   1775
   1776    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1777        grp = GICV3_G1NS;
   1778    }
   1779
   1780    cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
   1781    gicv3_cpuif_update(cs);
   1782}
   1783
   1784static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1785{
   1786    GICv3CPUState *cs = icc_cs_from_env(env);
   1787    uint64_t value;
   1788
   1789    /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
   1790    value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
   1791    trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
   1792    return value;
   1793}
   1794
   1795static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1796                                  uint64_t value)
   1797{
   1798    GICv3CPUState *cs = icc_cs_from_env(env);
   1799
   1800    trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
   1801
   1802    /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
   1803    cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
   1804    cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
   1805    gicv3_cpuif_update(cs);
   1806}
   1807
   1808static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1809{
   1810    GICv3CPUState *cs = icc_cs_from_env(env);
   1811    int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
   1812    uint64_t value;
   1813
   1814    if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1815        return icv_ctlr_read(env, ri);
   1816    }
   1817
   1818    value = cs->icc_ctlr_el1[bank];
   1819    trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
   1820    return value;
   1821}
   1822
   1823static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1824                               uint64_t value)
   1825{
   1826    GICv3CPUState *cs = icc_cs_from_env(env);
   1827    int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
   1828    uint64_t mask;
   1829
   1830    if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1831        icv_ctlr_write(env, ri, value);
   1832        return;
   1833    }
   1834
   1835    trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
   1836
   1837    /* Only CBPR and EOIMODE can be RW;
   1838     * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
   1839     * the asseciated priority-based routing of them);
   1840     * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
   1841     */
   1842    if (arm_feature(env, ARM_FEATURE_EL3) &&
   1843        ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
   1844        mask = ICC_CTLR_EL1_EOIMODE;
   1845    } else {
   1846        mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
   1847    }
   1848
   1849    cs->icc_ctlr_el1[bank] &= ~mask;
   1850    cs->icc_ctlr_el1[bank] |= (value & mask);
   1851    gicv3_cpuif_update(cs);
   1852}
   1853
   1854
   1855static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1856{
   1857    GICv3CPUState *cs = icc_cs_from_env(env);
   1858    uint64_t value;
   1859
   1860    value = cs->icc_ctlr_el3;
   1861    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
   1862        value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
   1863    }
   1864    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
   1865        value |= ICC_CTLR_EL3_CBPR_EL1NS;
   1866    }
   1867    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
   1868        value |= ICC_CTLR_EL3_EOIMODE_EL1S;
   1869    }
   1870    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
   1871        value |= ICC_CTLR_EL3_CBPR_EL1S;
   1872    }
   1873
   1874    trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
   1875    return value;
   1876}
   1877
   1878static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1879                               uint64_t value)
   1880{
   1881    GICv3CPUState *cs = icc_cs_from_env(env);
   1882    uint64_t mask;
   1883
   1884    trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
   1885
   1886    /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
   1887    cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
   1888    if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
   1889        cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
   1890    }
   1891    if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
   1892        cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
   1893    }
   1894
   1895    cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
   1896    if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
   1897        cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
   1898    }
   1899    if (value & ICC_CTLR_EL3_CBPR_EL1S) {
   1900        cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
   1901    }
   1902
   1903    /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
   1904    mask = ICC_CTLR_EL3_EOIMODE_EL3;
   1905
   1906    cs->icc_ctlr_el3 &= ~mask;
   1907    cs->icc_ctlr_el3 |= (value & mask);
   1908    gicv3_cpuif_update(cs);
   1909}
   1910
   1911static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
   1912                                          const ARMCPRegInfo *ri, bool isread)
   1913{
   1914    CPAccessResult r = CP_ACCESS_OK;
   1915    GICv3CPUState *cs = icc_cs_from_env(env);
   1916    int el = arm_current_el(env);
   1917
   1918    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
   1919        el == 1 && !arm_is_secure_below_el3(env)) {
   1920        /* Takes priority over a possible EL3 trap */
   1921        return CP_ACCESS_TRAP_EL2;
   1922    }
   1923
   1924    if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
   1925        switch (el) {
   1926        case 1:
   1927            /* Note that arm_hcr_el2_eff takes secure state into account.  */
   1928            if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
   1929                r = CP_ACCESS_TRAP_EL3;
   1930            }
   1931            break;
   1932        case 2:
   1933            r = CP_ACCESS_TRAP_EL3;
   1934            break;
   1935        case 3:
   1936            if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
   1937                r = CP_ACCESS_TRAP_EL3;
   1938            }
   1939            break;
   1940        default:
   1941            g_assert_not_reached();
   1942        }
   1943    }
   1944
   1945    if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
   1946        r = CP_ACCESS_TRAP;
   1947    }
   1948    return r;
   1949}
   1950
   1951static CPAccessResult gicv3_dir_access(CPUARMState *env,
   1952                                       const ARMCPRegInfo *ri, bool isread)
   1953{
   1954    GICv3CPUState *cs = icc_cs_from_env(env);
   1955
   1956    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
   1957        arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
   1958        /* Takes priority over a possible EL3 trap */
   1959        return CP_ACCESS_TRAP_EL2;
   1960    }
   1961
   1962    return gicv3_irqfiq_access(env, ri, isread);
   1963}
   1964
   1965static CPAccessResult gicv3_sgi_access(CPUARMState *env,
   1966                                       const ARMCPRegInfo *ri, bool isread)
   1967{
   1968    if (arm_current_el(env) == 1 &&
   1969        (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
   1970        /* Takes priority over a possible EL3 trap */
   1971        return CP_ACCESS_TRAP_EL2;
   1972    }
   1973
   1974    return gicv3_irqfiq_access(env, ri, isread);
   1975}
   1976
   1977static CPAccessResult gicv3_fiq_access(CPUARMState *env,
   1978                                       const ARMCPRegInfo *ri, bool isread)
   1979{
   1980    CPAccessResult r = CP_ACCESS_OK;
   1981    GICv3CPUState *cs = icc_cs_from_env(env);
   1982    int el = arm_current_el(env);
   1983
   1984    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
   1985        el == 1 && !arm_is_secure_below_el3(env)) {
   1986        /* Takes priority over a possible EL3 trap */
   1987        return CP_ACCESS_TRAP_EL2;
   1988    }
   1989
   1990    if (env->cp15.scr_el3 & SCR_FIQ) {
   1991        switch (el) {
   1992        case 1:
   1993            if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
   1994                r = CP_ACCESS_TRAP_EL3;
   1995            }
   1996            break;
   1997        case 2:
   1998            r = CP_ACCESS_TRAP_EL3;
   1999            break;
   2000        case 3:
   2001            if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
   2002                r = CP_ACCESS_TRAP_EL3;
   2003            }
   2004            break;
   2005        default:
   2006            g_assert_not_reached();
   2007        }
   2008    }
   2009
   2010    if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
   2011        r = CP_ACCESS_TRAP;
   2012    }
   2013    return r;
   2014}
   2015
   2016static CPAccessResult gicv3_irq_access(CPUARMState *env,
   2017                                       const ARMCPRegInfo *ri, bool isread)
   2018{
   2019    CPAccessResult r = CP_ACCESS_OK;
   2020    GICv3CPUState *cs = icc_cs_from_env(env);
   2021    int el = arm_current_el(env);
   2022
   2023    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
   2024        el == 1 && !arm_is_secure_below_el3(env)) {
   2025        /* Takes priority over a possible EL3 trap */
   2026        return CP_ACCESS_TRAP_EL2;
   2027    }
   2028
   2029    if (env->cp15.scr_el3 & SCR_IRQ) {
   2030        switch (el) {
   2031        case 1:
   2032            if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
   2033                r = CP_ACCESS_TRAP_EL3;
   2034            }
   2035            break;
   2036        case 2:
   2037            r = CP_ACCESS_TRAP_EL3;
   2038            break;
   2039        case 3:
   2040            if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
   2041                r = CP_ACCESS_TRAP_EL3;
   2042            }
   2043            break;
   2044        default:
   2045            g_assert_not_reached();
   2046        }
   2047    }
   2048
   2049    if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
   2050        r = CP_ACCESS_TRAP;
   2051    }
   2052    return r;
   2053}
   2054
   2055static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2056{
   2057    GICv3CPUState *cs = icc_cs_from_env(env);
   2058
   2059    cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
   2060        (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
   2061        (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
   2062    cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
   2063        (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
   2064        (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
   2065    cs->icc_pmr_el1 = 0;
   2066    cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
   2067    cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
   2068    cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
   2069    memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
   2070    memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
   2071    cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
   2072        (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
   2073        (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
   2074
   2075    memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
   2076    cs->ich_hcr_el2 = 0;
   2077    memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
   2078    cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
   2079        ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
   2080        (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
   2081}
   2082
   2083static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
   2084    { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
   2085      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
   2086      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2087      .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
   2088      .readfn = icc_pmr_read,
   2089      .writefn = icc_pmr_write,
   2090      /* We hang the whole cpu interface reset routine off here
   2091       * rather than parcelling it out into one little function
   2092       * per register
   2093       */
   2094      .resetfn = icc_reset,
   2095    },
   2096    { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
   2097      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
   2098      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2099      .access = PL1_R, .accessfn = gicv3_fiq_access,
   2100      .readfn = icc_iar0_read,
   2101    },
   2102    { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
   2103      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
   2104      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2105      .access = PL1_W, .accessfn = gicv3_fiq_access,
   2106      .writefn = icc_eoir_write,
   2107    },
   2108    { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
   2109      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
   2110      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2111      .access = PL1_R, .accessfn = gicv3_fiq_access,
   2112      .readfn = icc_hppir0_read,
   2113    },
   2114    { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
   2115      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
   2116      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2117      .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2118      .readfn = icc_bpr_read,
   2119      .writefn = icc_bpr_write,
   2120    },
   2121    { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
   2122      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
   2123      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2124      .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2125      .readfn = icc_ap_read,
   2126      .writefn = icc_ap_write,
   2127    },
   2128    { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
   2129      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
   2130      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2131      .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2132      .readfn = icc_ap_read,
   2133      .writefn = icc_ap_write,
   2134    },
   2135    { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
   2136      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
   2137      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2138      .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2139      .readfn = icc_ap_read,
   2140      .writefn = icc_ap_write,
   2141    },
   2142    { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
   2143      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
   2144      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2145      .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2146      .readfn = icc_ap_read,
   2147      .writefn = icc_ap_write,
   2148    },
   2149    /* All the ICC_AP1R*_EL1 registers are banked */
   2150    { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
   2151      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
   2152      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2153      .access = PL1_RW, .accessfn = gicv3_irq_access,
   2154      .readfn = icc_ap_read,
   2155      .writefn = icc_ap_write,
   2156    },
   2157    { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
   2158      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
   2159      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2160      .access = PL1_RW, .accessfn = gicv3_irq_access,
   2161      .readfn = icc_ap_read,
   2162      .writefn = icc_ap_write,
   2163    },
   2164    { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
   2165      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
   2166      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2167      .access = PL1_RW, .accessfn = gicv3_irq_access,
   2168      .readfn = icc_ap_read,
   2169      .writefn = icc_ap_write,
   2170    },
   2171    { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
   2172      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
   2173      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2174      .access = PL1_RW, .accessfn = gicv3_irq_access,
   2175      .readfn = icc_ap_read,
   2176      .writefn = icc_ap_write,
   2177    },
   2178    { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
   2179      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
   2180      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2181      .access = PL1_W, .accessfn = gicv3_dir_access,
   2182      .writefn = icc_dir_write,
   2183    },
   2184    { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
   2185      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
   2186      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2187      .access = PL1_R, .accessfn = gicv3_irqfiq_access,
   2188      .readfn = icc_rpr_read,
   2189    },
   2190    { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
   2191      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
   2192      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2193      .access = PL1_W, .accessfn = gicv3_sgi_access,
   2194      .writefn = icc_sgi1r_write,
   2195    },
   2196    { .name = "ICC_SGI1R",
   2197      .cp = 15, .opc1 = 0, .crm = 12,
   2198      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
   2199      .access = PL1_W, .accessfn = gicv3_sgi_access,
   2200      .writefn = icc_sgi1r_write,
   2201    },
   2202    { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
   2203      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
   2204      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2205      .access = PL1_W, .accessfn = gicv3_sgi_access,
   2206      .writefn = icc_asgi1r_write,
   2207    },
   2208    { .name = "ICC_ASGI1R",
   2209      .cp = 15, .opc1 = 1, .crm = 12,
   2210      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
   2211      .access = PL1_W, .accessfn = gicv3_sgi_access,
   2212      .writefn = icc_asgi1r_write,
   2213    },
   2214    { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
   2215      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
   2216      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2217      .access = PL1_W, .accessfn = gicv3_sgi_access,
   2218      .writefn = icc_sgi0r_write,
   2219    },
   2220    { .name = "ICC_SGI0R",
   2221      .cp = 15, .opc1 = 2, .crm = 12,
   2222      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
   2223      .access = PL1_W, .accessfn = gicv3_sgi_access,
   2224      .writefn = icc_sgi0r_write,
   2225    },
   2226    { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
   2227      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
   2228      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2229      .access = PL1_R, .accessfn = gicv3_irq_access,
   2230      .readfn = icc_iar1_read,
   2231    },
   2232    { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
   2233      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
   2234      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2235      .access = PL1_W, .accessfn = gicv3_irq_access,
   2236      .writefn = icc_eoir_write,
   2237    },
   2238    { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
   2239      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
   2240      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2241      .access = PL1_R, .accessfn = gicv3_irq_access,
   2242      .readfn = icc_hppir1_read,
   2243    },
   2244    /* This register is banked */
   2245    { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
   2246      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
   2247      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2248      .access = PL1_RW, .accessfn = gicv3_irq_access,
   2249      .readfn = icc_bpr_read,
   2250      .writefn = icc_bpr_write,
   2251    },
   2252    /* This register is banked */
   2253    { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
   2254      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
   2255      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2256      .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
   2257      .readfn = icc_ctlr_el1_read,
   2258      .writefn = icc_ctlr_el1_write,
   2259    },
   2260    { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
   2261      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
   2262      .type = ARM_CP_NO_RAW | ARM_CP_CONST,
   2263      .access = PL1_RW,
   2264      /* We don't support IRQ/FIQ bypass and system registers are
   2265       * always enabled, so all our bits are RAZ/WI or RAO/WI.
   2266       * This register is banked but since it's constant we don't
   2267       * need to do anything special.
   2268       */
   2269      .resetvalue = 0x7,
   2270    },
   2271    { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
   2272      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
   2273      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2274      .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2275      .readfn = icc_igrpen_read,
   2276      .writefn = icc_igrpen_write,
   2277    },
   2278    /* This register is banked */
   2279    { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
   2280      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
   2281      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2282      .access = PL1_RW, .accessfn = gicv3_irq_access,
   2283      .readfn = icc_igrpen_read,
   2284      .writefn = icc_igrpen_write,
   2285    },
   2286    { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
   2287      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
   2288      .type = ARM_CP_NO_RAW | ARM_CP_CONST,
   2289      .access = PL2_RW,
   2290      /* We don't support IRQ/FIQ bypass and system registers are
   2291       * always enabled, so all our bits are RAZ/WI or RAO/WI.
   2292       */
   2293      .resetvalue = 0xf,
   2294    },
   2295    { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
   2296      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
   2297      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2298      .access = PL3_RW,
   2299      .readfn = icc_ctlr_el3_read,
   2300      .writefn = icc_ctlr_el3_write,
   2301    },
   2302    { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
   2303      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
   2304      .type = ARM_CP_NO_RAW | ARM_CP_CONST,
   2305      .access = PL3_RW,
   2306      /* We don't support IRQ/FIQ bypass and system registers are
   2307       * always enabled, so all our bits are RAZ/WI or RAO/WI.
   2308       */
   2309      .resetvalue = 0xf,
   2310    },
   2311    { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
   2312      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
   2313      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2314      .access = PL3_RW,
   2315      .readfn = icc_igrpen1_el3_read,
   2316      .writefn = icc_igrpen1_el3_write,
   2317    },
   2318    REGINFO_SENTINEL
   2319};
   2320
   2321static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2322{
   2323    GICv3CPUState *cs = icc_cs_from_env(env);
   2324    int regno = ri->opc2 & 3;
   2325    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
   2326    uint64_t value;
   2327
   2328    value = cs->ich_apr[grp][regno];
   2329    trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   2330    return value;
   2331}
   2332
   2333static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2334                         uint64_t value)
   2335{
   2336    GICv3CPUState *cs = icc_cs_from_env(env);
   2337    int regno = ri->opc2 & 3;
   2338    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
   2339
   2340    trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   2341
   2342    cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
   2343    gicv3_cpuif_virt_update(cs);
   2344}
   2345
   2346static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2347{
   2348    GICv3CPUState *cs = icc_cs_from_env(env);
   2349    uint64_t value = cs->ich_hcr_el2;
   2350
   2351    trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
   2352    return value;
   2353}
   2354
   2355static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2356                          uint64_t value)
   2357{
   2358    GICv3CPUState *cs = icc_cs_from_env(env);
   2359
   2360    trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
   2361
   2362    value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
   2363        ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
   2364        ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
   2365        ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
   2366        ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
   2367
   2368    cs->ich_hcr_el2 = value;
   2369    gicv3_cpuif_virt_update(cs);
   2370}
   2371
   2372static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2373{
   2374    GICv3CPUState *cs = icc_cs_from_env(env);
   2375    uint64_t value = cs->ich_vmcr_el2;
   2376
   2377    trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
   2378    return value;
   2379}
   2380
   2381static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2382                         uint64_t value)
   2383{
   2384    GICv3CPUState *cs = icc_cs_from_env(env);
   2385
   2386    trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
   2387
   2388    value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
   2389        ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
   2390        ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
   2391    value |= ICH_VMCR_EL2_VFIQEN;
   2392
   2393    cs->ich_vmcr_el2 = value;
   2394    /* Enforce "writing BPRs to less than minimum sets them to the minimum"
   2395     * by reading and writing back the fields.
   2396     */
   2397    write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
   2398    write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
   2399
   2400    gicv3_cpuif_virt_update(cs);
   2401}
   2402
   2403static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2404{
   2405    GICv3CPUState *cs = icc_cs_from_env(env);
   2406    int regno = ri->opc2 | ((ri->crm & 1) << 3);
   2407    uint64_t value;
   2408
   2409    /* This read function handles all of:
   2410     * 64-bit reads of the whole LR
   2411     * 32-bit reads of the low half of the LR
   2412     * 32-bit reads of the high half of the LR
   2413     */
   2414    if (ri->state == ARM_CP_STATE_AA32) {
   2415        if (ri->crm >= 14) {
   2416            value = extract64(cs->ich_lr_el2[regno], 32, 32);
   2417            trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
   2418        } else {
   2419            value = extract64(cs->ich_lr_el2[regno], 0, 32);
   2420            trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
   2421        }
   2422    } else {
   2423        value = cs->ich_lr_el2[regno];
   2424        trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
   2425    }
   2426
   2427    return value;
   2428}
   2429
   2430static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2431                         uint64_t value)
   2432{
   2433    GICv3CPUState *cs = icc_cs_from_env(env);
   2434    int regno = ri->opc2 | ((ri->crm & 1) << 3);
   2435
   2436    /* This write function handles all of:
   2437     * 64-bit writes to the whole LR
   2438     * 32-bit writes to the low half of the LR
   2439     * 32-bit writes to the high half of the LR
   2440     */
   2441    if (ri->state == ARM_CP_STATE_AA32) {
   2442        if (ri->crm >= 14) {
   2443            trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
   2444            value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
   2445        } else {
   2446            trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
   2447            value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
   2448        }
   2449    } else {
   2450        trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
   2451    }
   2452
   2453    /* Enforce RES0 bits in priority field */
   2454    if (cs->vpribits < 8) {
   2455        value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
   2456                          8 - cs->vpribits, 0);
   2457    }
   2458
   2459    cs->ich_lr_el2[regno] = value;
   2460    gicv3_cpuif_virt_update(cs);
   2461}
   2462
   2463static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2464{
   2465    GICv3CPUState *cs = icc_cs_from_env(env);
   2466    uint64_t value;
   2467
   2468    value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
   2469        | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
   2470        | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
   2471        | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
   2472        | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
   2473
   2474    trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
   2475    return value;
   2476}
   2477
   2478static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2479{
   2480    GICv3CPUState *cs = icc_cs_from_env(env);
   2481    uint64_t value = maintenance_interrupt_state(cs);
   2482
   2483    trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
   2484    return value;
   2485}
   2486
   2487static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2488{
   2489    GICv3CPUState *cs = icc_cs_from_env(env);
   2490    uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
   2491
   2492    trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
   2493    return value;
   2494}
   2495
   2496static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2497{
   2498    GICv3CPUState *cs = icc_cs_from_env(env);
   2499    uint64_t value = 0;
   2500    int i;
   2501
   2502    for (i = 0; i < cs->num_list_regs; i++) {
   2503        uint64_t lr = cs->ich_lr_el2[i];
   2504
   2505        if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
   2506            ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
   2507            value |= (1 << i);
   2508        }
   2509    }
   2510
   2511    trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
   2512    return value;
   2513}
   2514
   2515static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
   2516    { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
   2517      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
   2518      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2519      .access = PL2_RW,
   2520      .readfn = ich_ap_read,
   2521      .writefn = ich_ap_write,
   2522    },
   2523    { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
   2524      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
   2525      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2526      .access = PL2_RW,
   2527      .readfn = ich_ap_read,
   2528      .writefn = ich_ap_write,
   2529    },
   2530    { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
   2531      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
   2532      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2533      .access = PL2_RW,
   2534      .readfn = ich_hcr_read,
   2535      .writefn = ich_hcr_write,
   2536    },
   2537    { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
   2538      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
   2539      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2540      .access = PL2_R,
   2541      .readfn = ich_vtr_read,
   2542    },
   2543    { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
   2544      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
   2545      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2546      .access = PL2_R,
   2547      .readfn = ich_misr_read,
   2548    },
   2549    { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
   2550      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
   2551      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2552      .access = PL2_R,
   2553      .readfn = ich_eisr_read,
   2554    },
   2555    { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
   2556      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
   2557      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2558      .access = PL2_R,
   2559      .readfn = ich_elrsr_read,
   2560    },
   2561    { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
   2562      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
   2563      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2564      .access = PL2_RW,
   2565      .readfn = ich_vmcr_read,
   2566      .writefn = ich_vmcr_write,
   2567    },
   2568    REGINFO_SENTINEL
   2569};
   2570
   2571static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
   2572    { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
   2573      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
   2574      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2575      .access = PL2_RW,
   2576      .readfn = ich_ap_read,
   2577      .writefn = ich_ap_write,
   2578    },
   2579    { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
   2580      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
   2581      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2582      .access = PL2_RW,
   2583      .readfn = ich_ap_read,
   2584      .writefn = ich_ap_write,
   2585    },
   2586    REGINFO_SENTINEL
   2587};
   2588
   2589static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
   2590    { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
   2591      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
   2592      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2593      .access = PL2_RW,
   2594      .readfn = ich_ap_read,
   2595      .writefn = ich_ap_write,
   2596    },
   2597    { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
   2598      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
   2599      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2600      .access = PL2_RW,
   2601      .readfn = ich_ap_read,
   2602      .writefn = ich_ap_write,
   2603    },
   2604    { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
   2605      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
   2606      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2607      .access = PL2_RW,
   2608      .readfn = ich_ap_read,
   2609      .writefn = ich_ap_write,
   2610    },
   2611    { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
   2612      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
   2613      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2614      .access = PL2_RW,
   2615      .readfn = ich_ap_read,
   2616      .writefn = ich_ap_write,
   2617    },
   2618    REGINFO_SENTINEL
   2619};
   2620
   2621static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
   2622{
   2623    GICv3CPUState *cs = opaque;
   2624
   2625    gicv3_cpuif_update(cs);
   2626}
   2627
   2628void gicv3_init_cpuif(GICv3State *s)
   2629{
   2630    /* Called from the GICv3 realize function; register our system
   2631     * registers with the CPU
   2632     */
   2633    int i;
   2634
   2635    for (i = 0; i < s->num_cpu; i++) {
   2636        ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
   2637        GICv3CPUState *cs = &s->cpu[i];
   2638
   2639        /* Note that we can't just use the GICv3CPUState as an opaque pointer
   2640         * in define_arm_cp_regs_with_opaque(), because when we're called back
   2641         * it might be with code translated by CPU 0 but run by CPU 1, in
   2642         * which case we'd get the wrong value.
   2643         * So instead we define the regs with no ri->opaque info, and
   2644         * get back to the GICv3CPUState from the CPUARMState.
   2645         */
   2646        define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
   2647        if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
   2648            && cpu->gic_num_lrs) {
   2649            int j;
   2650
   2651            cs->num_list_regs = cpu->gic_num_lrs;
   2652            cs->vpribits = cpu->gic_vpribits;
   2653            cs->vprebits = cpu->gic_vprebits;
   2654
   2655            /* Check against architectural constraints: getting these
   2656             * wrong would be a bug in the CPU code defining these,
   2657             * and the implementation relies on them holding.
   2658             */
   2659            g_assert(cs->vprebits <= cs->vpribits);
   2660            g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
   2661            g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
   2662
   2663            define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
   2664
   2665            for (j = 0; j < cs->num_list_regs; j++) {
   2666                /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
   2667                 * are split into two cp15 regs, LR (the low part, with the
   2668                 * same encoding as the AArch64 LR) and LRC (the high part).
   2669                 */
   2670                ARMCPRegInfo lr_regset[] = {
   2671                    { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
   2672                      .opc0 = 3, .opc1 = 4, .crn = 12,
   2673                      .crm = 12 + (j >> 3), .opc2 = j & 7,
   2674                      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2675                      .access = PL2_RW,
   2676                      .readfn = ich_lr_read,
   2677                      .writefn = ich_lr_write,
   2678                    },
   2679                    { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
   2680                      .cp = 15, .opc1 = 4, .crn = 12,
   2681                      .crm = 14 + (j >> 3), .opc2 = j & 7,
   2682                      .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2683                      .access = PL2_RW,
   2684                      .readfn = ich_lr_read,
   2685                      .writefn = ich_lr_write,
   2686                    },
   2687                    REGINFO_SENTINEL
   2688                };
   2689                define_arm_cp_regs(cpu, lr_regset);
   2690            }
   2691            if (cs->vprebits >= 6) {
   2692                define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
   2693            }
   2694            if (cs->vprebits == 7) {
   2695                define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
   2696            }
   2697        }
   2698        arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
   2699    }
   2700}