cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

arm_gicv3_common.c (19178B)


      1/*
      2 * ARM GICv3 support - common bits of emulated and KVM kernel model
      3 *
      4 * Copyright (c) 2012 Linaro Limited
      5 * Copyright (c) 2015 Huawei.
      6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
      7 * Written by Peter Maydell
      8 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
      9 *
     10 * This program is free software; you can redistribute it and/or modify
     11 * it under the terms of the GNU General Public License as published by
     12 * the Free Software Foundation, either version 2 of the License, or
     13 * (at your option) any later version.
     14 *
     15 * This program is distributed in the hope that it will be useful,
     16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     18 * GNU General Public License for more details.
     19 *
     20 * You should have received a copy of the GNU General Public License along
     21 * with this program; if not, see <http://www.gnu.org/licenses/>.
     22 */
     23
     24#include "qemu/osdep.h"
     25#include "qapi/error.h"
     26#include "qemu/module.h"
     27#include "hw/core/cpu.h"
     28#include "hw/intc/arm_gicv3_common.h"
     29#include "hw/qdev-properties.h"
     30#include "migration/vmstate.h"
     31#include "gicv3_internal.h"
     32#include "hw/arm/linux-boot-if.h"
     33#include "sysemu/kvm.h"
     34
     35
     36static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
     37{
     38    if (cs->gicd_no_migration_shift_bug) {
     39        return;
     40    }
     41
     42    /* Older versions of QEMU had a bug in the handling of state save/restore
     43     * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
     44     * so that instead of the data for external interrupts 32 and up
     45     * starting at bit position 32 in the bitmap, it started at bit
     46     * position 64. If we're receiving data from a QEMU with that bug,
     47     * we must move the data down into the right place.
     48     */
     49    memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8,
     50            sizeof(cs->group) - GIC_INTERNAL / 8);
     51    memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8,
     52            sizeof(cs->grpmod) - GIC_INTERNAL / 8);
     53    memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8,
     54            sizeof(cs->enabled) - GIC_INTERNAL / 8);
     55    memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8,
     56            sizeof(cs->pending) - GIC_INTERNAL / 8);
     57    memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8,
     58            sizeof(cs->active) - GIC_INTERNAL / 8);
     59    memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8,
     60            sizeof(cs->edge_trigger) - GIC_INTERNAL / 8);
     61
     62    /*
     63     * While this new version QEMU doesn't have this kind of bug as we fix it,
     64     * so it needs to set the flag to true to indicate that and it's necessary
     65     * for next migration to work from this new version QEMU.
     66     */
     67    cs->gicd_no_migration_shift_bug = true;
     68}
     69
     70static int gicv3_pre_save(void *opaque)
     71{
     72    GICv3State *s = (GICv3State *)opaque;
     73    ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
     74
     75    if (c->pre_save) {
     76        c->pre_save(s);
     77    }
     78
     79    return 0;
     80}
     81
     82static int gicv3_post_load(void *opaque, int version_id)
     83{
     84    GICv3State *s = (GICv3State *)opaque;
     85    ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
     86
     87    gicv3_gicd_no_migration_shift_bug_post_load(s);
     88
     89    if (c->post_load) {
     90        c->post_load(s);
     91    }
     92    return 0;
     93}
     94
     95static bool virt_state_needed(void *opaque)
     96{
     97    GICv3CPUState *cs = opaque;
     98
     99    return cs->num_list_regs != 0;
    100}
    101
    102static const VMStateDescription vmstate_gicv3_cpu_virt = {
    103    .name = "arm_gicv3_cpu/virt",
    104    .version_id = 1,
    105    .minimum_version_id = 1,
    106    .needed = virt_state_needed,
    107    .fields = (VMStateField[]) {
    108        VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
    109        VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
    110        VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
    111        VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
    112        VMSTATE_END_OF_LIST()
    113    }
    114};
    115
    116static int vmstate_gicv3_cpu_pre_load(void *opaque)
    117{
    118    GICv3CPUState *cs = opaque;
    119
    120   /*
    121    * If the sre_el1 subsection is not transferred this
    122    * means SRE_EL1 is 0x7 (which might not be the same as
    123    * our reset value).
    124    */
    125    cs->icc_sre_el1 = 0x7;
    126    return 0;
    127}
    128
    129static bool icc_sre_el1_reg_needed(void *opaque)
    130{
    131    GICv3CPUState *cs = opaque;
    132
    133    return cs->icc_sre_el1 != 7;
    134}
    135
    136const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
    137    .name = "arm_gicv3_cpu/sre_el1",
    138    .version_id = 1,
    139    .minimum_version_id = 1,
    140    .needed = icc_sre_el1_reg_needed,
    141    .fields = (VMStateField[]) {
    142        VMSTATE_UINT64(icc_sre_el1, GICv3CPUState),
    143        VMSTATE_END_OF_LIST()
    144    }
    145};
    146
    147static const VMStateDescription vmstate_gicv3_cpu = {
    148    .name = "arm_gicv3_cpu",
    149    .version_id = 1,
    150    .minimum_version_id = 1,
    151    .pre_load = vmstate_gicv3_cpu_pre_load,
    152    .fields = (VMStateField[]) {
    153        VMSTATE_UINT32(level, GICv3CPUState),
    154        VMSTATE_UINT32(gicr_ctlr, GICv3CPUState),
    155        VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2),
    156        VMSTATE_UINT32(gicr_waker, GICv3CPUState),
    157        VMSTATE_UINT64(gicr_propbaser, GICv3CPUState),
    158        VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState),
    159        VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState),
    160        VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState),
    161        VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState),
    162        VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState),
    163        VMSTATE_UINT32(edge_trigger, GICv3CPUState),
    164        VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState),
    165        VMSTATE_UINT32(gicr_nsacr, GICv3CPUState),
    166        VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL),
    167        VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2),
    168        VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState),
    169        VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3),
    170        VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4),
    171        VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
    172        VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
    173        VMSTATE_END_OF_LIST()
    174    },
    175    .subsections = (const VMStateDescription * []) {
    176        &vmstate_gicv3_cpu_virt,
    177        &vmstate_gicv3_cpu_sre_el1,
    178        NULL
    179    }
    180};
    181
    182static int gicv3_pre_load(void *opaque)
    183{
    184    GICv3State *cs = opaque;
    185
    186   /*
    187    * The gicd_no_migration_shift_bug flag is used for migration compatibility
    188    * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
    189    * Strictly, what we want to know is whether the migration source is using
    190    * KVM. Since we don't have any way to determine that, we look at whether the
    191    * destination is using KVM; this is close enough because for the older QEMU
    192    * versions with this bug KVM -> TCG migration didn't work anyway. If the
    193    * source is a newer QEMU without this bug it will transmit the migration
    194    * subsection which sets the flag to true; otherwise it will remain set to
    195    * the value we select here.
    196    */
    197    if (kvm_enabled()) {
    198        cs->gicd_no_migration_shift_bug = false;
    199    }
    200
    201    return 0;
    202}
    203
    204static bool needed_always(void *opaque)
    205{
    206    return true;
    207}
    208
    209const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
    210    .name = "arm_gicv3/gicd_no_migration_shift_bug",
    211    .version_id = 1,
    212    .minimum_version_id = 1,
    213    .needed = needed_always,
    214    .fields = (VMStateField[]) {
    215        VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State),
    216        VMSTATE_END_OF_LIST()
    217    }
    218};
    219
    220static const VMStateDescription vmstate_gicv3 = {
    221    .name = "arm_gicv3",
    222    .version_id = 1,
    223    .minimum_version_id = 1,
    224    .pre_load = gicv3_pre_load,
    225    .pre_save = gicv3_pre_save,
    226    .post_load = gicv3_post_load,
    227    .priority = MIG_PRI_GICV3,
    228    .fields = (VMStateField[]) {
    229        VMSTATE_UINT32(gicd_ctlr, GICv3State),
    230        VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2),
    231        VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE),
    232        VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE),
    233        VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE),
    234        VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE),
    235        VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE),
    236        VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE),
    237        VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE),
    238        VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ),
    239        VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ),
    240        VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State,
    241                             DIV_ROUND_UP(GICV3_MAXIRQ, 16)),
    242        VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu,
    243                                             vmstate_gicv3_cpu, GICv3CPUState),
    244        VMSTATE_END_OF_LIST()
    245    },
    246    .subsections = (const VMStateDescription * []) {
    247        &vmstate_gicv3_gicd_no_migration_shift_bug,
    248        NULL
    249    }
    250};
    251
    252void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
    253                              const MemoryRegionOps *ops, Error **errp)
    254{
    255    SysBusDevice *sbd = SYS_BUS_DEVICE(s);
    256    int rdist_capacity = 0;
    257    int i;
    258
    259    for (i = 0; i < s->nb_redist_regions; i++) {
    260        rdist_capacity += s->redist_region_count[i];
    261    }
    262    if (rdist_capacity < s->num_cpu) {
    263        error_setg(errp, "Capacity of the redist regions(%d) "
    264                   "is less than number of vcpus(%d)",
    265                   rdist_capacity, s->num_cpu);
    266        return;
    267    }
    268
    269    /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
    270     * GPIO array layout is thus:
    271     *  [0..N-1] spi
    272     *  [N..N+31] PPIs for CPU 0
    273     *  [N+32..N+63] PPIs for CPU 1
    274     *   ...
    275     */
    276    i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu;
    277    qdev_init_gpio_in(DEVICE(s), handler, i);
    278
    279    for (i = 0; i < s->num_cpu; i++) {
    280        sysbus_init_irq(sbd, &s->cpu[i].parent_irq);
    281    }
    282    for (i = 0; i < s->num_cpu; i++) {
    283        sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
    284    }
    285    for (i = 0; i < s->num_cpu; i++) {
    286        sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
    287    }
    288    for (i = 0; i < s->num_cpu; i++) {
    289        sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
    290    }
    291
    292    memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
    293                          "gicv3_dist", 0x10000);
    294    sysbus_init_mmio(sbd, &s->iomem_dist);
    295
    296    s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions);
    297    for (i = 0; i < s->nb_redist_regions; i++) {
    298        char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
    299
    300        memory_region_init_io(&s->iomem_redist[i], OBJECT(s),
    301                              ops ? &ops[1] : NULL, s, name,
    302                              s->redist_region_count[i] * GICV3_REDIST_SIZE);
    303        sysbus_init_mmio(sbd, &s->iomem_redist[i]);
    304        g_free(name);
    305    }
    306}
    307
    308static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
    309{
    310    GICv3State *s = ARM_GICV3_COMMON(dev);
    311    int i;
    312
    313    /* revision property is actually reserved and currently used only in order
    314     * to keep the interface compatible with GICv2 code, avoiding extra
    315     * conditions. However, in future it could be used, for example, if we
    316     * implement GICv4.
    317     */
    318    if (s->revision != 3) {
    319        error_setg(errp, "unsupported GIC revision %d", s->revision);
    320        return;
    321    }
    322
    323    if (s->num_irq > GICV3_MAXIRQ) {
    324        error_setg(errp,
    325                   "requested %u interrupt lines exceeds GIC maximum %d",
    326                   s->num_irq, GICV3_MAXIRQ);
    327        return;
    328    }
    329    if (s->num_irq < GIC_INTERNAL) {
    330        error_setg(errp,
    331                   "requested %u interrupt lines is below GIC minimum %d",
    332                   s->num_irq, GIC_INTERNAL);
    333        return;
    334    }
    335
    336    /* ITLinesNumber is represented as (N / 32) - 1, so this is an
    337     * implementation imposed restriction, not an architectural one,
    338     * so we don't have to deal with bitfields where only some of the
    339     * bits in a 32-bit word should be valid.
    340     */
    341    if (s->num_irq % 32) {
    342        error_setg(errp,
    343                   "%d interrupt lines unsupported: not divisible by 32",
    344                   s->num_irq);
    345        return;
    346    }
    347
    348    if (s->lpi_enable && !s->dma) {
    349        error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set");
    350        return;
    351    }
    352
    353    s->cpu = g_new0(GICv3CPUState, s->num_cpu);
    354
    355    for (i = 0; i < s->num_cpu; i++) {
    356        CPUState *cpu = qemu_get_cpu(i);
    357        uint64_t cpu_affid;
    358        int last;
    359
    360        s->cpu[i].cpu = cpu;
    361        s->cpu[i].gic = s;
    362        /* Store GICv3CPUState in CPUARMState gicv3state pointer */
    363        gicv3_set_gicv3state(cpu, &s->cpu[i]);
    364
    365        /* Pre-construct the GICR_TYPER:
    366         * For our implementation:
    367         *  Top 32 bits are the affinity value of the associated CPU
    368         *  CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
    369         *  Processor_Number == CPU index starting from 0
    370         *  DPGS == 0 (GICR_CTLR.DPG* not supported)
    371         *  Last == 1 if this is the last redistributor in a series of
    372         *            contiguous redistributor pages
    373         *  DirectLPI == 0 (direct injection of LPIs not supported)
    374         *  VLPIS == 0 (virtual LPIs not supported)
    375         *  PLPIS == 0 (physical LPIs not supported)
    376         */
    377        cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
    378        last = (i == s->num_cpu - 1);
    379
    380        /* The CPU mp-affinity property is in MPIDR register format; squash
    381         * the affinity bytes into 32 bits as the GICR_TYPER has them.
    382         */
    383        cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
    384                     (cpu_affid & 0xFFFFFF);
    385        s->cpu[i].gicr_typer = (cpu_affid << 32) |
    386            (1 << 24) |
    387            (i << 8) |
    388            (last << 4);
    389
    390        if (s->lpi_enable) {
    391            s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
    392        }
    393    }
    394}
    395
    396static void arm_gicv3_finalize(Object *obj)
    397{
    398    GICv3State *s = ARM_GICV3_COMMON(obj);
    399
    400    g_free(s->redist_region_count);
    401}
    402
    403static void arm_gicv3_common_reset(DeviceState *dev)
    404{
    405    GICv3State *s = ARM_GICV3_COMMON(dev);
    406    int i;
    407
    408    for (i = 0; i < s->num_cpu; i++) {
    409        GICv3CPUState *cs = &s->cpu[i];
    410
    411        cs->level = 0;
    412        cs->gicr_ctlr = 0;
    413        cs->gicr_statusr[GICV3_S] = 0;
    414        cs->gicr_statusr[GICV3_NS] = 0;
    415        cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
    416        cs->gicr_propbaser = 0;
    417        cs->gicr_pendbaser = 0;
    418        /* If we're resetting a TZ-aware GIC as if secure firmware
    419         * had set it up ready to start a kernel in non-secure, we
    420         * need to set interrupts to group 1 so the kernel can use them.
    421         * Otherwise they reset to group 0 like the hardware.
    422         */
    423        if (s->irq_reset_nonsecure) {
    424            cs->gicr_igroupr0 = 0xffffffff;
    425        } else {
    426            cs->gicr_igroupr0 = 0;
    427        }
    428
    429        cs->gicr_ienabler0 = 0;
    430        cs->gicr_ipendr0 = 0;
    431        cs->gicr_iactiver0 = 0;
    432        cs->edge_trigger = 0xffff;
    433        cs->gicr_igrpmodr0 = 0;
    434        cs->gicr_nsacr = 0;
    435        memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
    436
    437        cs->hppi.prio = 0xff;
    438        cs->hpplpi.prio = 0xff;
    439
    440        /* State in the CPU interface must *not* be reset here, because it
    441         * is part of the CPU's reset domain, not the GIC device's.
    442         */
    443    }
    444
    445    /* For our implementation affinity routing is always enabled */
    446    if (s->security_extn) {
    447        s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS;
    448    } else {
    449        s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE;
    450    }
    451
    452    s->gicd_statusr[GICV3_S] = 0;
    453    s->gicd_statusr[GICV3_NS] = 0;
    454
    455    memset(s->group, 0, sizeof(s->group));
    456    memset(s->grpmod, 0, sizeof(s->grpmod));
    457    memset(s->enabled, 0, sizeof(s->enabled));
    458    memset(s->pending, 0, sizeof(s->pending));
    459    memset(s->active, 0, sizeof(s->active));
    460    memset(s->level, 0, sizeof(s->level));
    461    memset(s->edge_trigger, 0, sizeof(s->edge_trigger));
    462    memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority));
    463    memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter));
    464    memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr));
    465    /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
    466     * write these to get sane behaviour and we need not populate the
    467     * pointer cache here; however having the cache be different for
    468     * "happened to be 0 from reset" and "guest wrote 0" would be
    469     * too confusing.
    470     */
    471    gicv3_cache_all_target_cpustates(s);
    472
    473    if (s->irq_reset_nonsecure) {
    474        /* If we're resetting a TZ-aware GIC as if secure firmware
    475         * had set it up ready to start a kernel in non-secure, we
    476         * need to set interrupts to group 1 so the kernel can use them.
    477         * Otherwise they reset to group 0 like the hardware.
    478         */
    479        for (i = GIC_INTERNAL; i < s->num_irq; i++) {
    480            gicv3_gicd_group_set(s, i);
    481        }
    482    }
    483    s->gicd_no_migration_shift_bug = true;
    484}
    485
    486static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
    487                                      bool secure_boot)
    488{
    489    GICv3State *s = ARM_GICV3_COMMON(obj);
    490
    491    if (s->security_extn && !secure_boot) {
    492        /* We're directly booting a kernel into NonSecure. If this GIC
    493         * implements the security extensions then we must configure it
    494         * to have all the interrupts be NonSecure (this is a job that
    495         * is done by the Secure boot firmware in real hardware, and in
    496         * this mode QEMU is acting as a minimalist firmware-and-bootloader
    497         * equivalent).
    498         */
    499        s->irq_reset_nonsecure = true;
    500    }
    501}
    502
    503static Property arm_gicv3_common_properties[] = {
    504    DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
    505    DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
    506    DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
    507    DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
    508    DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
    509    DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
    510                      redist_region_count, qdev_prop_uint32, uint32_t),
    511    DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
    512                     MemoryRegion *),
    513    DEFINE_PROP_END_OF_LIST(),
    514};
    515
    516static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
    517{
    518    DeviceClass *dc = DEVICE_CLASS(klass);
    519    ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
    520
    521    dc->reset = arm_gicv3_common_reset;
    522    dc->realize = arm_gicv3_common_realize;
    523    device_class_set_props(dc, arm_gicv3_common_properties);
    524    dc->vmsd = &vmstate_gicv3;
    525    albifc->arm_linux_init = arm_gic_common_linux_init;
    526}
    527
    528static const TypeInfo arm_gicv3_common_type = {
    529    .name = TYPE_ARM_GICV3_COMMON,
    530    .parent = TYPE_SYS_BUS_DEVICE,
    531    .instance_size = sizeof(GICv3State),
    532    .class_size = sizeof(ARMGICv3CommonClass),
    533    .class_init = arm_gicv3_common_class_init,
    534    .instance_finalize = arm_gicv3_finalize,
    535    .abstract = true,
    536    .interfaces = (InterfaceInfo []) {
    537        { TYPE_ARM_LINUX_BOOT_IF },
    538        { },
    539    },
    540};
    541
    542static void register_types(void)
    543{
    544    type_register_static(&arm_gicv3_common_type);
    545}
    546
    547type_init(register_types)