cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

cpu64.c (37122B)


      1/*
      2 * QEMU AArch64 CPU
      3 *
      4 * Copyright (c) 2013 Linaro Ltd
      5 *
      6 * This program is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU General Public License
      8 * as published by the Free Software Foundation; either version 2
      9 * of the License, or (at your option) any later version.
     10 *
     11 * This program is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14 * GNU General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU General Public License
     17 * along with this program; if not, see
     18 * <http://www.gnu.org/licenses/gpl-2.0.html>
     19 */
     20
     21#include "qemu/osdep.h"
     22#include "qapi/error.h"
     23#include "cpu.h"
     24#ifdef CONFIG_TCG
     25#include "hw/core/tcg-cpu-ops.h"
     26#endif /* CONFIG_TCG */
     27#include "qemu/module.h"
     28#if !defined(CONFIG_USER_ONLY)
     29#include "hw/loader.h"
     30#endif
     31#include "sysemu/kvm.h"
     32#include "kvm_arm.h"
     33#include "qapi/visitor.h"
     34#include "hw/qdev-properties.h"
     35
     36
     37#ifndef CONFIG_USER_ONLY
     38static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
     39{
     40    ARMCPU *cpu = env_archcpu(env);
     41
     42    /* Number of cores is in [25:24]; otherwise we RAZ */
     43    return (cpu->core_count - 1) << 24;
     44}
     45#endif
     46
     47static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
     48#ifndef CONFIG_USER_ONLY
     49    { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
     50      .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
     51      .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
     52      .writefn = arm_cp_write_ignore },
     53    { .name = "L2CTLR",
     54      .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
     55      .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
     56      .writefn = arm_cp_write_ignore },
     57#endif
     58    { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
     59      .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
     60      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     61    { .name = "L2ECTLR",
     62      .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
     63      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     64    { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
     65      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
     66      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     67    { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
     68      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
     69      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     70    { .name = "CPUACTLR",
     71      .cp = 15, .opc1 = 0, .crm = 15,
     72      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
     73    { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
     74      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
     75      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     76    { .name = "CPUECTLR",
     77      .cp = 15, .opc1 = 1, .crm = 15,
     78      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
     79    { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
     80      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
     81      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     82    { .name = "CPUMERRSR",
     83      .cp = 15, .opc1 = 2, .crm = 15,
     84      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
     85    { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
     86      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
     87      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
     88    { .name = "L2MERRSR",
     89      .cp = 15, .opc1 = 3, .crm = 15,
     90      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
     91    REGINFO_SENTINEL
     92};
     93
     94static void aarch64_a57_initfn(Object *obj)
     95{
     96    ARMCPU *cpu = ARM_CPU(obj);
     97
     98    cpu->dtb_compatible = "arm,cortex-a57";
     99    set_feature(&cpu->env, ARM_FEATURE_V8);
    100    set_feature(&cpu->env, ARM_FEATURE_NEON);
    101    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
    102    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
    103    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
    104    set_feature(&cpu->env, ARM_FEATURE_EL2);
    105    set_feature(&cpu->env, ARM_FEATURE_EL3);
    106    set_feature(&cpu->env, ARM_FEATURE_PMU);
    107    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
    108    cpu->midr = 0x411fd070;
    109    cpu->revidr = 0x00000000;
    110    cpu->reset_fpsid = 0x41034070;
    111    cpu->isar.mvfr0 = 0x10110222;
    112    cpu->isar.mvfr1 = 0x12111111;
    113    cpu->isar.mvfr2 = 0x00000043;
    114    cpu->ctr = 0x8444c004;
    115    cpu->reset_sctlr = 0x00c50838;
    116    cpu->isar.id_pfr0 = 0x00000131;
    117    cpu->isar.id_pfr1 = 0x00011011;
    118    cpu->isar.id_dfr0 = 0x03010066;
    119    cpu->id_afr0 = 0x00000000;
    120    cpu->isar.id_mmfr0 = 0x10101105;
    121    cpu->isar.id_mmfr1 = 0x40000000;
    122    cpu->isar.id_mmfr2 = 0x01260000;
    123    cpu->isar.id_mmfr3 = 0x02102211;
    124    cpu->isar.id_isar0 = 0x02101110;
    125    cpu->isar.id_isar1 = 0x13112111;
    126    cpu->isar.id_isar2 = 0x21232042;
    127    cpu->isar.id_isar3 = 0x01112131;
    128    cpu->isar.id_isar4 = 0x00011142;
    129    cpu->isar.id_isar5 = 0x00011121;
    130    cpu->isar.id_isar6 = 0;
    131    cpu->isar.id_aa64pfr0 = 0x00002222;
    132    cpu->isar.id_aa64dfr0 = 0x10305106;
    133    cpu->isar.id_aa64isar0 = 0x00011120;
    134    cpu->isar.id_aa64mmfr0 = 0x00001124;
    135    cpu->isar.dbgdidr = 0x3516d000;
    136    cpu->clidr = 0x0a200023;
    137    cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
    138    cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
    139    cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
    140    cpu->dcz_blocksize = 4; /* 64 bytes */
    141    cpu->gic_num_lrs = 4;
    142    cpu->gic_vpribits = 5;
    143    cpu->gic_vprebits = 5;
    144    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
    145}
    146
    147static void aarch64_a53_initfn(Object *obj)
    148{
    149    ARMCPU *cpu = ARM_CPU(obj);
    150
    151    cpu->dtb_compatible = "arm,cortex-a53";
    152    set_feature(&cpu->env, ARM_FEATURE_V8);
    153    set_feature(&cpu->env, ARM_FEATURE_NEON);
    154    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
    155    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
    156    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
    157    set_feature(&cpu->env, ARM_FEATURE_EL2);
    158    set_feature(&cpu->env, ARM_FEATURE_EL3);
    159    set_feature(&cpu->env, ARM_FEATURE_PMU);
    160    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
    161    cpu->midr = 0x410fd034;
    162    cpu->revidr = 0x00000000;
    163    cpu->reset_fpsid = 0x41034070;
    164    cpu->isar.mvfr0 = 0x10110222;
    165    cpu->isar.mvfr1 = 0x12111111;
    166    cpu->isar.mvfr2 = 0x00000043;
    167    cpu->ctr = 0x84448004; /* L1Ip = VIPT */
    168    cpu->reset_sctlr = 0x00c50838;
    169    cpu->isar.id_pfr0 = 0x00000131;
    170    cpu->isar.id_pfr1 = 0x00011011;
    171    cpu->isar.id_dfr0 = 0x03010066;
    172    cpu->id_afr0 = 0x00000000;
    173    cpu->isar.id_mmfr0 = 0x10101105;
    174    cpu->isar.id_mmfr1 = 0x40000000;
    175    cpu->isar.id_mmfr2 = 0x01260000;
    176    cpu->isar.id_mmfr3 = 0x02102211;
    177    cpu->isar.id_isar0 = 0x02101110;
    178    cpu->isar.id_isar1 = 0x13112111;
    179    cpu->isar.id_isar2 = 0x21232042;
    180    cpu->isar.id_isar3 = 0x01112131;
    181    cpu->isar.id_isar4 = 0x00011142;
    182    cpu->isar.id_isar5 = 0x00011121;
    183    cpu->isar.id_isar6 = 0;
    184    cpu->isar.id_aa64pfr0 = 0x00002222;
    185    cpu->isar.id_aa64dfr0 = 0x10305106;
    186    cpu->isar.id_aa64isar0 = 0x00011120;
    187    cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
    188    cpu->isar.dbgdidr = 0x3516d000;
    189    cpu->clidr = 0x0a200023;
    190    cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
    191    cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
    192    cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
    193    cpu->dcz_blocksize = 4; /* 64 bytes */
    194    cpu->gic_num_lrs = 4;
    195    cpu->gic_vpribits = 5;
    196    cpu->gic_vprebits = 5;
    197    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
    198}
    199
    200static void aarch64_a72_initfn(Object *obj)
    201{
    202    ARMCPU *cpu = ARM_CPU(obj);
    203
    204    cpu->dtb_compatible = "arm,cortex-a72";
    205    set_feature(&cpu->env, ARM_FEATURE_V8);
    206    set_feature(&cpu->env, ARM_FEATURE_NEON);
    207    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
    208    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
    209    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
    210    set_feature(&cpu->env, ARM_FEATURE_EL2);
    211    set_feature(&cpu->env, ARM_FEATURE_EL3);
    212    set_feature(&cpu->env, ARM_FEATURE_PMU);
    213    cpu->midr = 0x410fd083;
    214    cpu->revidr = 0x00000000;
    215    cpu->reset_fpsid = 0x41034080;
    216    cpu->isar.mvfr0 = 0x10110222;
    217    cpu->isar.mvfr1 = 0x12111111;
    218    cpu->isar.mvfr2 = 0x00000043;
    219    cpu->ctr = 0x8444c004;
    220    cpu->reset_sctlr = 0x00c50838;
    221    cpu->isar.id_pfr0 = 0x00000131;
    222    cpu->isar.id_pfr1 = 0x00011011;
    223    cpu->isar.id_dfr0 = 0x03010066;
    224    cpu->id_afr0 = 0x00000000;
    225    cpu->isar.id_mmfr0 = 0x10201105;
    226    cpu->isar.id_mmfr1 = 0x40000000;
    227    cpu->isar.id_mmfr2 = 0x01260000;
    228    cpu->isar.id_mmfr3 = 0x02102211;
    229    cpu->isar.id_isar0 = 0x02101110;
    230    cpu->isar.id_isar1 = 0x13112111;
    231    cpu->isar.id_isar2 = 0x21232042;
    232    cpu->isar.id_isar3 = 0x01112131;
    233    cpu->isar.id_isar4 = 0x00011142;
    234    cpu->isar.id_isar5 = 0x00011121;
    235    cpu->isar.id_aa64pfr0 = 0x00002222;
    236    cpu->isar.id_aa64dfr0 = 0x10305106;
    237    cpu->isar.id_aa64isar0 = 0x00011120;
    238    cpu->isar.id_aa64mmfr0 = 0x00001124;
    239    cpu->isar.dbgdidr = 0x3516d000;
    240    cpu->clidr = 0x0a200023;
    241    cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
    242    cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
    243    cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
    244    cpu->dcz_blocksize = 4; /* 64 bytes */
    245    cpu->gic_num_lrs = 4;
    246    cpu->gic_vpribits = 5;
    247    cpu->gic_vprebits = 5;
    248    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
    249}
    250
    251void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
    252{
    253    /*
    254     * If any vector lengths are explicitly enabled with sve<N> properties,
    255     * then all other lengths are implicitly disabled.  If sve-max-vq is
    256     * specified then it is the same as explicitly enabling all lengths
    257     * up to and including the specified maximum, which means all larger
    258     * lengths will be implicitly disabled.  If no sve<N> properties
    259     * are enabled and sve-max-vq is not specified, then all lengths not
    260     * explicitly disabled will be enabled.  Additionally, all power-of-two
    261     * vector lengths less than the maximum enabled length will be
    262     * automatically enabled and all vector lengths larger than the largest
    263     * disabled power-of-two vector length will be automatically disabled.
    264     * Errors are generated if the user provided input that interferes with
    265     * any of the above.  Finally, if SVE is not disabled, then at least one
    266     * vector length must be enabled.
    267     */
    268    DECLARE_BITMAP(tmp, ARM_MAX_VQ);
    269    uint32_t vq, max_vq = 0;
    270
    271    /*
    272     * CPU models specify a set of supported vector lengths which are
    273     * enabled by default.  Attempting to enable any vector length not set
    274     * in the supported bitmap results in an error.  When KVM is enabled we
    275     * fetch the supported bitmap from the host.
    276     */
    277    if (kvm_enabled() && kvm_arm_sve_supported()) {
    278        kvm_arm_sve_get_vls(CPU(cpu), cpu->sve_vq_supported);
    279    } else if (kvm_enabled()) {
    280        assert(!cpu_isar_feature(aa64_sve, cpu));
    281    }
    282
    283    /*
    284     * Process explicit sve<N> properties.
    285     * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
    286     * Check first for any sve<N> enabled.
    287     */
    288    if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
    289        max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
    290
    291        if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
    292            error_setg(errp, "cannot enable sve%d", max_vq * 128);
    293            error_append_hint(errp, "sve%d is larger than the maximum vector "
    294                              "length, sve-max-vq=%d (%d bits)\n",
    295                              max_vq * 128, cpu->sve_max_vq,
    296                              cpu->sve_max_vq * 128);
    297            return;
    298        }
    299
    300        if (kvm_enabled()) {
    301            /*
    302             * For KVM we have to automatically enable all supported unitialized
    303             * lengths, even when the smaller lengths are not all powers-of-two.
    304             */
    305            bitmap_andnot(tmp, cpu->sve_vq_supported, cpu->sve_vq_init, max_vq);
    306            bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
    307        } else {
    308            /* Propagate enabled bits down through required powers-of-two. */
    309            for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
    310                if (!test_bit(vq - 1, cpu->sve_vq_init)) {
    311                    set_bit(vq - 1, cpu->sve_vq_map);
    312                }
    313            }
    314        }
    315    } else if (cpu->sve_max_vq == 0) {
    316        /*
    317         * No explicit bits enabled, and no implicit bits from sve-max-vq.
    318         */
    319        if (!cpu_isar_feature(aa64_sve, cpu)) {
    320            /* SVE is disabled and so are all vector lengths.  Good. */
    321            return;
    322        }
    323
    324        if (kvm_enabled()) {
    325            /* Disabling a supported length disables all larger lengths. */
    326            for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
    327                if (test_bit(vq - 1, cpu->sve_vq_init) &&
    328                    test_bit(vq - 1, cpu->sve_vq_supported)) {
    329                    break;
    330                }
    331            }
    332        } else {
    333            /* Disabling a power-of-two disables all larger lengths. */
    334            for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
    335                if (test_bit(vq - 1, cpu->sve_vq_init)) {
    336                    break;
    337                }
    338            }
    339        }
    340
    341        max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
    342        bitmap_andnot(cpu->sve_vq_map, cpu->sve_vq_supported,
    343                      cpu->sve_vq_init, max_vq);
    344        if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
    345            error_setg(errp, "cannot disable sve%d", vq * 128);
    346            error_append_hint(errp, "Disabling sve%d results in all "
    347                              "vector lengths being disabled.\n",
    348                              vq * 128);
    349            error_append_hint(errp, "With SVE enabled, at least one "
    350                              "vector length must be enabled.\n");
    351            return;
    352        }
    353
    354        max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
    355    }
    356
    357    /*
    358     * Process the sve-max-vq property.
    359     * Note that we know from the above that no bit above
    360     * sve-max-vq is currently set.
    361     */
    362    if (cpu->sve_max_vq != 0) {
    363        max_vq = cpu->sve_max_vq;
    364
    365        if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
    366            test_bit(max_vq - 1, cpu->sve_vq_init)) {
    367            error_setg(errp, "cannot disable sve%d", max_vq * 128);
    368            error_append_hint(errp, "The maximum vector length must be "
    369                              "enabled, sve-max-vq=%d (%d bits)\n",
    370                              max_vq, max_vq * 128);
    371            return;
    372        }
    373
    374        /* Set all bits not explicitly set within sve-max-vq. */
    375        bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
    376        bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
    377    }
    378
    379    /*
    380     * We should know what max-vq is now.  Also, as we're done
    381     * manipulating sve-vq-map, we ensure any bits above max-vq
    382     * are clear, just in case anybody looks.
    383     */
    384    assert(max_vq != 0);
    385    bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
    386
    387    /* Ensure the set of lengths matches what is supported. */
    388    bitmap_xor(tmp, cpu->sve_vq_map, cpu->sve_vq_supported, max_vq);
    389    if (!bitmap_empty(tmp, max_vq)) {
    390        vq = find_last_bit(tmp, max_vq) + 1;
    391        if (test_bit(vq - 1, cpu->sve_vq_map)) {
    392            if (cpu->sve_max_vq) {
    393                error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
    394                error_append_hint(errp, "This CPU does not support "
    395                                  "the vector length %d-bits.\n", vq * 128);
    396                error_append_hint(errp, "It may not be possible to use "
    397                                  "sve-max-vq with this CPU. Try "
    398                                  "using only sve<N> properties.\n");
    399            } else {
    400                error_setg(errp, "cannot enable sve%d", vq * 128);
    401                error_append_hint(errp, "This CPU does not support "
    402                                  "the vector length %d-bits.\n", vq * 128);
    403            }
    404            return;
    405        } else {
    406            if (kvm_enabled()) {
    407                error_setg(errp, "cannot disable sve%d", vq * 128);
    408                error_append_hint(errp, "The KVM host requires all "
    409                                  "supported vector lengths smaller "
    410                                  "than %d bits to also be enabled.\n",
    411                                  max_vq * 128);
    412                return;
    413            } else {
    414                /* Ensure all required powers-of-two are enabled. */
    415                for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
    416                    if (!test_bit(vq - 1, cpu->sve_vq_map)) {
    417                        error_setg(errp, "cannot disable sve%d", vq * 128);
    418                        error_append_hint(errp, "sve%d is required as it "
    419                                          "is a power-of-two length smaller "
    420                                          "than the maximum, sve%d\n",
    421                                          vq * 128, max_vq * 128);
    422                        return;
    423                    }
    424                }
    425            }
    426        }
    427    }
    428
    429    /*
    430     * Now that we validated all our vector lengths, the only question
    431     * left to answer is if we even want SVE at all.
    432     */
    433    if (!cpu_isar_feature(aa64_sve, cpu)) {
    434        error_setg(errp, "cannot enable sve%d", max_vq * 128);
    435        error_append_hint(errp, "SVE must be enabled to enable vector "
    436                          "lengths.\n");
    437        error_append_hint(errp, "Add sve=on to the CPU property list.\n");
    438        return;
    439    }
    440
    441    /* From now on sve_max_vq is the actual maximum supported length. */
    442    cpu->sve_max_vq = max_vq;
    443}
    444
    445static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
    446                                   void *opaque, Error **errp)
    447{
    448    ARMCPU *cpu = ARM_CPU(obj);
    449    uint32_t value;
    450
    451    /* All vector lengths are disabled when SVE is off. */
    452    if (!cpu_isar_feature(aa64_sve, cpu)) {
    453        value = 0;
    454    } else {
    455        value = cpu->sve_max_vq;
    456    }
    457    visit_type_uint32(v, name, &value, errp);
    458}
    459
    460static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
    461                                   void *opaque, Error **errp)
    462{
    463    ARMCPU *cpu = ARM_CPU(obj);
    464    uint32_t max_vq;
    465
    466    if (!visit_type_uint32(v, name, &max_vq, errp)) {
    467        return;
    468    }
    469
    470    if (kvm_enabled() && !kvm_arm_sve_supported()) {
    471        error_setg(errp, "cannot set sve-max-vq");
    472        error_append_hint(errp, "SVE not supported by KVM on this host\n");
    473        return;
    474    }
    475
    476    if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
    477        error_setg(errp, "unsupported SVE vector length");
    478        error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
    479                          ARM_MAX_VQ);
    480        return;
    481    }
    482
    483    cpu->sve_max_vq = max_vq;
    484}
    485
    486/*
    487 * Note that cpu_arm_get/set_sve_vq cannot use the simpler
    488 * object_property_add_bool interface because they make use
    489 * of the contents of "name" to determine which bit on which
    490 * to operate.
    491 */
    492static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
    493                               void *opaque, Error **errp)
    494{
    495    ARMCPU *cpu = ARM_CPU(obj);
    496    uint32_t vq = atoi(&name[3]) / 128;
    497    bool value;
    498
    499    /* All vector lengths are disabled when SVE is off. */
    500    if (!cpu_isar_feature(aa64_sve, cpu)) {
    501        value = false;
    502    } else {
    503        value = test_bit(vq - 1, cpu->sve_vq_map);
    504    }
    505    visit_type_bool(v, name, &value, errp);
    506}
    507
    508static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
    509                               void *opaque, Error **errp)
    510{
    511    ARMCPU *cpu = ARM_CPU(obj);
    512    uint32_t vq = atoi(&name[3]) / 128;
    513    bool value;
    514
    515    if (!visit_type_bool(v, name, &value, errp)) {
    516        return;
    517    }
    518
    519    if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
    520        error_setg(errp, "cannot enable %s", name);
    521        error_append_hint(errp, "SVE not supported by KVM on this host\n");
    522        return;
    523    }
    524
    525    if (value) {
    526        set_bit(vq - 1, cpu->sve_vq_map);
    527    } else {
    528        clear_bit(vq - 1, cpu->sve_vq_map);
    529    }
    530    set_bit(vq - 1, cpu->sve_vq_init);
    531}
    532
    533static bool cpu_arm_get_sve(Object *obj, Error **errp)
    534{
    535    ARMCPU *cpu = ARM_CPU(obj);
    536    return cpu_isar_feature(aa64_sve, cpu);
    537}
    538
    539static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
    540{
    541    ARMCPU *cpu = ARM_CPU(obj);
    542    uint64_t t;
    543
    544    if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
    545        error_setg(errp, "'sve' feature not supported by KVM on this host");
    546        return;
    547    }
    548
    549    t = cpu->isar.id_aa64pfr0;
    550    t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
    551    cpu->isar.id_aa64pfr0 = t;
    552}
    553
    554#ifdef CONFIG_USER_ONLY
    555/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
    556static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
    557                                            const char *name, void *opaque,
    558                                            Error **errp)
    559{
    560    ARMCPU *cpu = ARM_CPU(obj);
    561    int32_t default_len, default_vq, remainder;
    562
    563    if (!visit_type_int32(v, name, &default_len, errp)) {
    564        return;
    565    }
    566
    567    /* Undocumented, but the kernel allows -1 to indicate "maximum". */
    568    if (default_len == -1) {
    569        cpu->sve_default_vq = ARM_MAX_VQ;
    570        return;
    571    }
    572
    573    default_vq = default_len / 16;
    574    remainder = default_len % 16;
    575
    576    /*
    577     * Note that the 512 max comes from include/uapi/asm/sve_context.h
    578     * and is the maximum architectural width of ZCR_ELx.LEN.
    579     */
    580    if (remainder || default_vq < 1 || default_vq > 512) {
    581        error_setg(errp, "cannot set sve-default-vector-length");
    582        if (remainder) {
    583            error_append_hint(errp, "Vector length not a multiple of 16\n");
    584        } else if (default_vq < 1) {
    585            error_append_hint(errp, "Vector length smaller than 16\n");
    586        } else {
    587            error_append_hint(errp, "Vector length larger than %d\n",
    588                              512 * 16);
    589        }
    590        return;
    591    }
    592
    593    cpu->sve_default_vq = default_vq;
    594}
    595
    596static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
    597                                            const char *name, void *opaque,
    598                                            Error **errp)
    599{
    600    ARMCPU *cpu = ARM_CPU(obj);
    601    int32_t value = cpu->sve_default_vq * 16;
    602
    603    visit_type_int32(v, name, &value, errp);
    604}
    605#endif
    606
    607void aarch64_add_sve_properties(Object *obj)
    608{
    609    uint32_t vq;
    610
    611    object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
    612
    613    for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
    614        char name[8];
    615        sprintf(name, "sve%d", vq * 128);
    616        object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
    617                            cpu_arm_set_sve_vq, NULL, NULL);
    618    }
    619
    620#ifdef CONFIG_USER_ONLY
    621    /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
    622    object_property_add(obj, "sve-default-vector-length", "int32",
    623                        cpu_arm_get_sve_default_vec_len,
    624                        cpu_arm_set_sve_default_vec_len, NULL, NULL);
    625#endif
    626}
    627
    628void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
    629{
    630    int arch_val = 0, impdef_val = 0;
    631    uint64_t t;
    632
    633    /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
    634    if (cpu->prop_pauth) {
    635        if (cpu->prop_pauth_impdef) {
    636            impdef_val = 1;
    637        } else {
    638            arch_val = 1;
    639        }
    640    } else if (cpu->prop_pauth_impdef) {
    641        error_setg(errp, "cannot enable pauth-impdef without pauth");
    642        error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
    643    }
    644
    645    t = cpu->isar.id_aa64isar1;
    646    t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
    647    t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
    648    t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
    649    t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
    650    cpu->isar.id_aa64isar1 = t;
    651}
    652
    653static Property arm_cpu_pauth_property =
    654    DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
    655static Property arm_cpu_pauth_impdef_property =
    656    DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
    657
    658/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
    659 * otherwise, a CPU with as many features enabled as our emulation supports.
    660 * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
    661 * this only needs to handle 64 bits.
    662 */
    663static void aarch64_max_initfn(Object *obj)
    664{
    665    ARMCPU *cpu = ARM_CPU(obj);
    666
    667    if (kvm_enabled()) {
    668        kvm_arm_set_cpu_features_from_host(cpu);
    669    } else {
    670        uint64_t t;
    671        uint32_t u;
    672        aarch64_a57_initfn(obj);
    673
    674        /*
    675         * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
    676         * one and try to apply errata workarounds or use impdef features we
    677         * don't provide.
    678         * An IMPLEMENTER field of 0 means "reserved for software use";
    679         * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
    680         * to see which features are present";
    681         * the VARIANT, PARTNUM and REVISION fields are all implementation
    682         * defined and we choose to define PARTNUM just in case guest
    683         * code needs to distinguish this QEMU CPU from other software
    684         * implementations, though this shouldn't be needed.
    685         */
    686        t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
    687        t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
    688        t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
    689        t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
    690        t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
    691        cpu->midr = t;
    692
    693        t = cpu->isar.id_aa64isar0;
    694        t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
    695        t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
    696        t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
    697        t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
    698        t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
    699        t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
    700        t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
    701        t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
    702        t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
    703        t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
    704        t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
    705        t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
    706        t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
    707        t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
    708        cpu->isar.id_aa64isar0 = t;
    709
    710        t = cpu->isar.id_aa64isar1;
    711        t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
    712        t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
    713        t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
    714        t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
    715        t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
    716        t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
    717        t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
    718        t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
    719        t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
    720        cpu->isar.id_aa64isar1 = t;
    721
    722        t = cpu->isar.id_aa64pfr0;
    723        t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
    724        t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
    725        t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
    726        t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
    727        t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
    728        cpu->isar.id_aa64pfr0 = t;
    729
    730        t = cpu->isar.id_aa64pfr1;
    731        t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
    732        t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
    733        /*
    734         * Begin with full support for MTE. This will be downgraded to MTE=0
    735         * during realize if the board provides no tag memory, much like
    736         * we do for EL2 with the virtualization=on property.
    737         */
    738        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
    739        cpu->isar.id_aa64pfr1 = t;
    740
    741        t = cpu->isar.id_aa64mmfr0;
    742        t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */
    743        cpu->isar.id_aa64mmfr0 = t;
    744
    745        t = cpu->isar.id_aa64mmfr1;
    746        t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
    747        t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
    748        t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
    749        t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
    750        t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
    751        t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
    752        cpu->isar.id_aa64mmfr1 = t;
    753
    754        t = cpu->isar.id_aa64mmfr2;
    755        t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
    756        t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
    757        t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
    758        cpu->isar.id_aa64mmfr2 = t;
    759
    760        t = cpu->isar.id_aa64zfr0;
    761        t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
    762        t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);  /* PMULL */
    763        t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
    764        t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
    765        t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
    766        t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
    767        t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
    768        t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
    769        t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
    770        cpu->isar.id_aa64zfr0 = t;
    771
    772        /* Replicate the same data to the 32-bit id registers.  */
    773        u = cpu->isar.id_isar5;
    774        u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
    775        u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
    776        u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
    777        u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
    778        u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
    779        u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
    780        cpu->isar.id_isar5 = u;
    781
    782        u = cpu->isar.id_isar6;
    783        u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
    784        u = FIELD_DP32(u, ID_ISAR6, DP, 1);
    785        u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
    786        u = FIELD_DP32(u, ID_ISAR6, SB, 1);
    787        u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
    788        u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
    789        u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
    790        cpu->isar.id_isar6 = u;
    791
    792        u = cpu->isar.id_pfr0;
    793        u = FIELD_DP32(u, ID_PFR0, DIT, 1);
    794        cpu->isar.id_pfr0 = u;
    795
    796        u = cpu->isar.id_pfr2;
    797        u = FIELD_DP32(u, ID_PFR2, SSBS, 1);
    798        cpu->isar.id_pfr2 = u;
    799
    800        u = cpu->isar.id_mmfr3;
    801        u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
    802        cpu->isar.id_mmfr3 = u;
    803
    804        u = cpu->isar.id_mmfr4;
    805        u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
    806        u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
    807        u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
    808        u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
    809        cpu->isar.id_mmfr4 = u;
    810
    811        t = cpu->isar.id_aa64dfr0;
    812        t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
    813        cpu->isar.id_aa64dfr0 = t;
    814
    815        u = cpu->isar.id_dfr0;
    816        u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
    817        cpu->isar.id_dfr0 = u;
    818
    819        u = cpu->isar.mvfr1;
    820        u = FIELD_DP32(u, MVFR1, FPHP, 3);      /* v8.2-FP16 */
    821        u = FIELD_DP32(u, MVFR1, SIMDHP, 2);    /* v8.2-FP16 */
    822        cpu->isar.mvfr1 = u;
    823
    824#ifdef CONFIG_USER_ONLY
    825        /* For usermode -cpu max we can use a larger and more efficient DCZ
    826         * blocksize since we don't have to follow what the hardware does.
    827         */
    828        cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
    829        cpu->dcz_blocksize = 7; /*  512 bytes */
    830#endif
    831
    832        /* Default to PAUTH on, with the architected algorithm. */
    833        qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
    834        qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
    835
    836        bitmap_fill(cpu->sve_vq_supported, ARM_MAX_VQ);
    837    }
    838
    839    aarch64_add_sve_properties(obj);
    840    object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
    841                        cpu_max_set_sve_max_vq, NULL, NULL);
    842}
    843
    844static void aarch64_a64fx_initfn(Object *obj)
    845{
    846    ARMCPU *cpu = ARM_CPU(obj);
    847
    848    cpu->dtb_compatible = "arm,a64fx";
    849    set_feature(&cpu->env, ARM_FEATURE_V8);
    850    set_feature(&cpu->env, ARM_FEATURE_NEON);
    851    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
    852    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
    853    set_feature(&cpu->env, ARM_FEATURE_EL2);
    854    set_feature(&cpu->env, ARM_FEATURE_EL3);
    855    set_feature(&cpu->env, ARM_FEATURE_PMU);
    856    cpu->midr = 0x461f0010;
    857    cpu->revidr = 0x00000000;
    858    cpu->ctr = 0x86668006;
    859    cpu->reset_sctlr = 0x30000180;
    860    cpu->isar.id_aa64pfr0 =   0x0000000101111111; /* No RAS Extensions */
    861    cpu->isar.id_aa64pfr1 = 0x0000000000000000;
    862    cpu->isar.id_aa64dfr0 = 0x0000000010305408;
    863    cpu->isar.id_aa64dfr1 = 0x0000000000000000;
    864    cpu->id_aa64afr0 = 0x0000000000000000;
    865    cpu->id_aa64afr1 = 0x0000000000000000;
    866    cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
    867    cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
    868    cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
    869    cpu->isar.id_aa64isar0 = 0x0000000010211120;
    870    cpu->isar.id_aa64isar1 = 0x0000000000010001;
    871    cpu->isar.id_aa64zfr0 = 0x0000000000000000;
    872    cpu->clidr = 0x0000000080000023;
    873    cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
    874    cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
    875    cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
    876    cpu->dcz_blocksize = 6; /* 256 bytes */
    877    cpu->gic_num_lrs = 4;
    878    cpu->gic_vpribits = 5;
    879    cpu->gic_vprebits = 5;
    880
    881    /* Suppport of A64FX's vector length are 128,256 and 512bit only */
    882    aarch64_add_sve_properties(obj);
    883    bitmap_zero(cpu->sve_vq_supported, ARM_MAX_VQ);
    884    set_bit(0, cpu->sve_vq_supported); /* 128bit */
    885    set_bit(1, cpu->sve_vq_supported); /* 256bit */
    886    set_bit(3, cpu->sve_vq_supported); /* 512bit */
    887
    888    /* TODO:  Add A64FX specific HPC extension registers */
    889}
    890
    891static const ARMCPUInfo aarch64_cpus[] = {
    892    { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
    893    { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
    894    { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
    895    { .name = "a64fx",              .initfn = aarch64_a64fx_initfn },
    896    { .name = "max",                .initfn = aarch64_max_initfn },
    897};
    898
    899static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
    900{
    901    ARMCPU *cpu = ARM_CPU(obj);
    902
    903    return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
    904}
    905
    906static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
    907{
    908    ARMCPU *cpu = ARM_CPU(obj);
    909
    910    /* At this time, this property is only allowed if KVM is enabled.  This
    911     * restriction allows us to avoid fixing up functionality that assumes a
    912     * uniform execution state like do_interrupt.
    913     */
    914    if (value == false) {
    915        if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
    916            error_setg(errp, "'aarch64' feature cannot be disabled "
    917                             "unless KVM is enabled and 32-bit EL1 "
    918                             "is supported");
    919            return;
    920        }
    921        unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
    922    } else {
    923        set_feature(&cpu->env, ARM_FEATURE_AARCH64);
    924    }
    925}
    926
    927static void aarch64_cpu_finalizefn(Object *obj)
    928{
    929}
    930
    931static gchar *aarch64_gdb_arch_name(CPUState *cs)
    932{
    933    return g_strdup("aarch64");
    934}
    935
    936static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
    937{
    938    CPUClass *cc = CPU_CLASS(oc);
    939
    940    cc->gdb_read_register = aarch64_cpu_gdb_read_register;
    941    cc->gdb_write_register = aarch64_cpu_gdb_write_register;
    942    cc->gdb_num_core_regs = 34;
    943    cc->gdb_core_xml_file = "aarch64-core.xml";
    944    cc->gdb_arch_name = aarch64_gdb_arch_name;
    945
    946    object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
    947                                   aarch64_cpu_set_aarch64);
    948    object_class_property_set_description(oc, "aarch64",
    949                                          "Set on/off to enable/disable aarch64 "
    950                                          "execution state ");
    951}
    952
    953static void aarch64_cpu_instance_init(Object *obj)
    954{
    955    ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
    956
    957    acc->info->initfn(obj);
    958    arm_cpu_post_init(obj);
    959}
    960
    961static void cpu_register_class_init(ObjectClass *oc, void *data)
    962{
    963    ARMCPUClass *acc = ARM_CPU_CLASS(oc);
    964
    965    acc->info = data;
    966}
    967
    968void aarch64_cpu_register(const ARMCPUInfo *info)
    969{
    970    TypeInfo type_info = {
    971        .parent = TYPE_AARCH64_CPU,
    972        .instance_size = sizeof(ARMCPU),
    973        .instance_init = aarch64_cpu_instance_init,
    974        .class_size = sizeof(ARMCPUClass),
    975        .class_init = info->class_init ?: cpu_register_class_init,
    976        .class_data = (void *)info,
    977    };
    978
    979    type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
    980    type_register(&type_info);
    981    g_free((void *)type_info.name);
    982}
    983
    984static const TypeInfo aarch64_cpu_type_info = {
    985    .name = TYPE_AARCH64_CPU,
    986    .parent = TYPE_ARM_CPU,
    987    .instance_size = sizeof(ARMCPU),
    988    .instance_finalize = aarch64_cpu_finalizefn,
    989    .abstract = true,
    990    .class_size = sizeof(AArch64CPUClass),
    991    .class_init = aarch64_cpu_class_init,
    992};
    993
    994static void aarch64_cpu_register_types(void)
    995{
    996    size_t i;
    997
    998    type_register_static(&aarch64_cpu_type_info);
    999
   1000    for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
   1001        aarch64_cpu_register(&aarch64_cpus[i]);
   1002    }
   1003}
   1004
   1005type_init(aarch64_cpu_register_types)