cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

helper.c (494710B)


      1/*
      2 * ARM generic helpers.
      3 *
      4 * This code is licensed under the GNU GPL v2 or later.
      5 *
      6 * SPDX-License-Identifier: GPL-2.0-or-later
      7 */
      8
      9#include "qemu/osdep.h"
     10#include "qemu/units.h"
     11#include "target/arm/idau.h"
     12#include "trace.h"
     13#include "cpu.h"
     14#include "internals.h"
     15#include "exec/helper-proto.h"
     16#include "qemu/host-utils.h"
     17#include "qemu/main-loop.h"
     18#include "qemu/bitops.h"
     19#include "qemu/crc32c.h"
     20#include "qemu/qemu-print.h"
     21#include "exec/exec-all.h"
     22#include <zlib.h> /* For crc32 */
     23#include "hw/irq.h"
     24#include "semihosting/semihost.h"
     25#include "sysemu/cpus.h"
     26#include "sysemu/cpu-timers.h"
     27#include "sysemu/kvm.h"
     28#include "sysemu/tcg.h"
     29#include "qemu/range.h"
     30#include "qapi/qapi-commands-machine-target.h"
     31#include "qapi/error.h"
     32#include "qemu/guest-random.h"
     33#ifdef CONFIG_TCG
     34#include "arm_ldst.h"
     35#include "exec/cpu_ldst.h"
     36#include "semihosting/common-semi.h"
     37#endif
     38
     39#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
     40#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
     41
     42#ifndef CONFIG_USER_ONLY
     43
     44static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
     45                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
     46                               bool s1_is_el0,
     47                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
     48                               target_ulong *page_size_ptr,
     49                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
     50    __attribute__((nonnull));
     51#endif
     52
     53static void switch_mode(CPUARMState *env, int mode);
     54static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
     55
     56static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
     57{
     58    assert(ri->fieldoffset);
     59    if (cpreg_field_is_64bit(ri)) {
     60        return CPREG_FIELD64(env, ri);
     61    } else {
     62        return CPREG_FIELD32(env, ri);
     63    }
     64}
     65
     66static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
     67                      uint64_t value)
     68{
     69    assert(ri->fieldoffset);
     70    if (cpreg_field_is_64bit(ri)) {
     71        CPREG_FIELD64(env, ri) = value;
     72    } else {
     73        CPREG_FIELD32(env, ri) = value;
     74    }
     75}
     76
     77static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
     78{
     79    return (char *)env + ri->fieldoffset;
     80}
     81
     82uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
     83{
     84    /* Raw read of a coprocessor register (as needed for migration, etc). */
     85    if (ri->type & ARM_CP_CONST) {
     86        return ri->resetvalue;
     87    } else if (ri->raw_readfn) {
     88        return ri->raw_readfn(env, ri);
     89    } else if (ri->readfn) {
     90        return ri->readfn(env, ri);
     91    } else {
     92        return raw_read(env, ri);
     93    }
     94}
     95
     96static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
     97                             uint64_t v)
     98{
     99    /* Raw write of a coprocessor register (as needed for migration, etc).
    100     * Note that constant registers are treated as write-ignored; the
    101     * caller should check for success by whether a readback gives the
    102     * value written.
    103     */
    104    if (ri->type & ARM_CP_CONST) {
    105        return;
    106    } else if (ri->raw_writefn) {
    107        ri->raw_writefn(env, ri, v);
    108    } else if (ri->writefn) {
    109        ri->writefn(env, ri, v);
    110    } else {
    111        raw_write(env, ri, v);
    112    }
    113}
    114
    115static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
    116{
    117   /* Return true if the regdef would cause an assertion if you called
    118    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
    119    * program bug for it not to have the NO_RAW flag).
    120    * NB that returning false here doesn't necessarily mean that calling
    121    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
    122    * read/write access functions which are safe for raw use" from "has
    123    * read/write access functions which have side effects but has forgotten
    124    * to provide raw access functions".
    125    * The tests here line up with the conditions in read/write_raw_cp_reg()
    126    * and assertions in raw_read()/raw_write().
    127    */
    128    if ((ri->type & ARM_CP_CONST) ||
    129        ri->fieldoffset ||
    130        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
    131        return false;
    132    }
    133    return true;
    134}
    135
    136bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
    137{
    138    /* Write the coprocessor state from cpu->env to the (index,value) list. */
    139    int i;
    140    bool ok = true;
    141
    142    for (i = 0; i < cpu->cpreg_array_len; i++) {
    143        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
    144        const ARMCPRegInfo *ri;
    145        uint64_t newval;
    146
    147        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    148        if (!ri) {
    149            ok = false;
    150            continue;
    151        }
    152        if (ri->type & ARM_CP_NO_RAW) {
    153            continue;
    154        }
    155
    156        newval = read_raw_cp_reg(&cpu->env, ri);
    157        if (kvm_sync) {
    158            /*
    159             * Only sync if the previous list->cpustate sync succeeded.
    160             * Rather than tracking the success/failure state for every
    161             * item in the list, we just recheck "does the raw write we must
    162             * have made in write_list_to_cpustate() read back OK" here.
    163             */
    164            uint64_t oldval = cpu->cpreg_values[i];
    165
    166            if (oldval == newval) {
    167                continue;
    168            }
    169
    170            write_raw_cp_reg(&cpu->env, ri, oldval);
    171            if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
    172                continue;
    173            }
    174
    175            write_raw_cp_reg(&cpu->env, ri, newval);
    176        }
    177        cpu->cpreg_values[i] = newval;
    178    }
    179    return ok;
    180}
    181
    182bool write_list_to_cpustate(ARMCPU *cpu)
    183{
    184    int i;
    185    bool ok = true;
    186
    187    for (i = 0; i < cpu->cpreg_array_len; i++) {
    188        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
    189        uint64_t v = cpu->cpreg_values[i];
    190        const ARMCPRegInfo *ri;
    191
    192        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    193        if (!ri) {
    194            ok = false;
    195            continue;
    196        }
    197        if (ri->type & ARM_CP_NO_RAW) {
    198            continue;
    199        }
    200        /* Write value and confirm it reads back as written
    201         * (to catch read-only registers and partially read-only
    202         * registers where the incoming migration value doesn't match)
    203         */
    204        write_raw_cp_reg(&cpu->env, ri, v);
    205        if (read_raw_cp_reg(&cpu->env, ri) != v) {
    206            ok = false;
    207        }
    208    }
    209    return ok;
    210}
    211
    212static void add_cpreg_to_list(gpointer key, gpointer opaque)
    213{
    214    ARMCPU *cpu = opaque;
    215    uint64_t regidx;
    216    const ARMCPRegInfo *ri;
    217
    218    regidx = *(uint32_t *)key;
    219    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    220
    221    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
    222        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
    223        /* The value array need not be initialized at this point */
    224        cpu->cpreg_array_len++;
    225    }
    226}
    227
    228static void count_cpreg(gpointer key, gpointer opaque)
    229{
    230    ARMCPU *cpu = opaque;
    231    uint64_t regidx;
    232    const ARMCPRegInfo *ri;
    233
    234    regidx = *(uint32_t *)key;
    235    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    236
    237    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
    238        cpu->cpreg_array_len++;
    239    }
    240}
    241
    242static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
    243{
    244    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
    245    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
    246
    247    if (aidx > bidx) {
    248        return 1;
    249    }
    250    if (aidx < bidx) {
    251        return -1;
    252    }
    253    return 0;
    254}
    255
    256void init_cpreg_list(ARMCPU *cpu)
    257{
    258    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
    259     * Note that we require cpreg_tuples[] to be sorted by key ID.
    260     */
    261    GList *keys;
    262    int arraylen;
    263
    264    keys = g_hash_table_get_keys(cpu->cp_regs);
    265    keys = g_list_sort(keys, cpreg_key_compare);
    266
    267    cpu->cpreg_array_len = 0;
    268
    269    g_list_foreach(keys, count_cpreg, cpu);
    270
    271    arraylen = cpu->cpreg_array_len;
    272    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
    273    cpu->cpreg_values = g_new(uint64_t, arraylen);
    274    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
    275    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
    276    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
    277    cpu->cpreg_array_len = 0;
    278
    279    g_list_foreach(keys, add_cpreg_to_list, cpu);
    280
    281    assert(cpu->cpreg_array_len == arraylen);
    282
    283    g_list_free(keys);
    284}
    285
    286/*
    287 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
    288 */
    289static CPAccessResult access_el3_aa32ns(CPUARMState *env,
    290                                        const ARMCPRegInfo *ri,
    291                                        bool isread)
    292{
    293    if (!is_a64(env) && arm_current_el(env) == 3 &&
    294        arm_is_secure_below_el3(env)) {
    295        return CP_ACCESS_TRAP_UNCATEGORIZED;
    296    }
    297    return CP_ACCESS_OK;
    298}
    299
    300/* Some secure-only AArch32 registers trap to EL3 if used from
    301 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
    302 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
    303 * We assume that the .access field is set to PL1_RW.
    304 */
    305static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
    306                                            const ARMCPRegInfo *ri,
    307                                            bool isread)
    308{
    309    if (arm_current_el(env) == 3) {
    310        return CP_ACCESS_OK;
    311    }
    312    if (arm_is_secure_below_el3(env)) {
    313        if (env->cp15.scr_el3 & SCR_EEL2) {
    314            return CP_ACCESS_TRAP_EL2;
    315        }
    316        return CP_ACCESS_TRAP_EL3;
    317    }
    318    /* This will be EL1 NS and EL2 NS, which just UNDEF */
    319    return CP_ACCESS_TRAP_UNCATEGORIZED;
    320}
    321
    322static uint64_t arm_mdcr_el2_eff(CPUARMState *env)
    323{
    324    return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
    325}
    326
    327/* Check for traps to "powerdown debug" registers, which are controlled
    328 * by MDCR.TDOSA
    329 */
    330static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
    331                                   bool isread)
    332{
    333    int el = arm_current_el(env);
    334    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
    335    bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
    336        (arm_hcr_el2_eff(env) & HCR_TGE);
    337
    338    if (el < 2 && mdcr_el2_tdosa) {
    339        return CP_ACCESS_TRAP_EL2;
    340    }
    341    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
    342        return CP_ACCESS_TRAP_EL3;
    343    }
    344    return CP_ACCESS_OK;
    345}
    346
    347/* Check for traps to "debug ROM" registers, which are controlled
    348 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
    349 */
    350static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
    351                                  bool isread)
    352{
    353    int el = arm_current_el(env);
    354    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
    355    bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
    356        (arm_hcr_el2_eff(env) & HCR_TGE);
    357
    358    if (el < 2 && mdcr_el2_tdra) {
    359        return CP_ACCESS_TRAP_EL2;
    360    }
    361    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
    362        return CP_ACCESS_TRAP_EL3;
    363    }
    364    return CP_ACCESS_OK;
    365}
    366
    367/* Check for traps to general debug registers, which are controlled
    368 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
    369 */
    370static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
    371                                  bool isread)
    372{
    373    int el = arm_current_el(env);
    374    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
    375    bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
    376        (arm_hcr_el2_eff(env) & HCR_TGE);
    377
    378    if (el < 2 && mdcr_el2_tda) {
    379        return CP_ACCESS_TRAP_EL2;
    380    }
    381    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
    382        return CP_ACCESS_TRAP_EL3;
    383    }
    384    return CP_ACCESS_OK;
    385}
    386
    387/* Check for traps to performance monitor registers, which are controlled
    388 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
    389 */
    390static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
    391                                 bool isread)
    392{
    393    int el = arm_current_el(env);
    394    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
    395
    396    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
    397        return CP_ACCESS_TRAP_EL2;
    398    }
    399    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
    400        return CP_ACCESS_TRAP_EL3;
    401    }
    402    return CP_ACCESS_OK;
    403}
    404
    405/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
    406static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
    407                                      bool isread)
    408{
    409    if (arm_current_el(env) == 1) {
    410        uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
    411        if (arm_hcr_el2_eff(env) & trap) {
    412            return CP_ACCESS_TRAP_EL2;
    413        }
    414    }
    415    return CP_ACCESS_OK;
    416}
    417
    418/* Check for traps from EL1 due to HCR_EL2.TSW.  */
    419static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
    420                                 bool isread)
    421{
    422    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
    423        return CP_ACCESS_TRAP_EL2;
    424    }
    425    return CP_ACCESS_OK;
    426}
    427
    428/* Check for traps from EL1 due to HCR_EL2.TACR.  */
    429static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
    430                                  bool isread)
    431{
    432    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
    433        return CP_ACCESS_TRAP_EL2;
    434    }
    435    return CP_ACCESS_OK;
    436}
    437
    438/* Check for traps from EL1 due to HCR_EL2.TTLB. */
    439static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
    440                                  bool isread)
    441{
    442    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
    443        return CP_ACCESS_TRAP_EL2;
    444    }
    445    return CP_ACCESS_OK;
    446}
    447
    448static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
    449{
    450    ARMCPU *cpu = env_archcpu(env);
    451
    452    raw_write(env, ri, value);
    453    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
    454}
    455
    456static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
    457{
    458    ARMCPU *cpu = env_archcpu(env);
    459
    460    if (raw_read(env, ri) != value) {
    461        /* Unlike real hardware the qemu TLB uses virtual addresses,
    462         * not modified virtual addresses, so this causes a TLB flush.
    463         */
    464        tlb_flush(CPU(cpu));
    465        raw_write(env, ri, value);
    466    }
    467}
    468
    469static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    470                             uint64_t value)
    471{
    472    ARMCPU *cpu = env_archcpu(env);
    473
    474    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
    475        && !extended_addresses_enabled(env)) {
    476        /* For VMSA (when not using the LPAE long descriptor page table
    477         * format) this register includes the ASID, so do a TLB flush.
    478         * For PMSA it is purely a process ID and no action is needed.
    479         */
    480        tlb_flush(CPU(cpu));
    481    }
    482    raw_write(env, ri, value);
    483}
    484
    485/* IS variants of TLB operations must affect all cores */
    486static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    487                             uint64_t value)
    488{
    489    CPUState *cs = env_cpu(env);
    490
    491    tlb_flush_all_cpus_synced(cs);
    492}
    493
    494static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    495                             uint64_t value)
    496{
    497    CPUState *cs = env_cpu(env);
    498
    499    tlb_flush_all_cpus_synced(cs);
    500}
    501
    502static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    503                             uint64_t value)
    504{
    505    CPUState *cs = env_cpu(env);
    506
    507    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
    508}
    509
    510static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    511                             uint64_t value)
    512{
    513    CPUState *cs = env_cpu(env);
    514
    515    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
    516}
    517
    518/*
    519 * Non-IS variants of TLB operations are upgraded to
    520 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
    521 * force broadcast of these operations.
    522 */
    523static bool tlb_force_broadcast(CPUARMState *env)
    524{
    525    return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
    526}
    527
    528static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
    529                          uint64_t value)
    530{
    531    /* Invalidate all (TLBIALL) */
    532    CPUState *cs = env_cpu(env);
    533
    534    if (tlb_force_broadcast(env)) {
    535        tlb_flush_all_cpus_synced(cs);
    536    } else {
    537        tlb_flush(cs);
    538    }
    539}
    540
    541static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
    542                          uint64_t value)
    543{
    544    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
    545    CPUState *cs = env_cpu(env);
    546
    547    value &= TARGET_PAGE_MASK;
    548    if (tlb_force_broadcast(env)) {
    549        tlb_flush_page_all_cpus_synced(cs, value);
    550    } else {
    551        tlb_flush_page(cs, value);
    552    }
    553}
    554
    555static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
    556                           uint64_t value)
    557{
    558    /* Invalidate by ASID (TLBIASID) */
    559    CPUState *cs = env_cpu(env);
    560
    561    if (tlb_force_broadcast(env)) {
    562        tlb_flush_all_cpus_synced(cs);
    563    } else {
    564        tlb_flush(cs);
    565    }
    566}
    567
    568static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
    569                           uint64_t value)
    570{
    571    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
    572    CPUState *cs = env_cpu(env);
    573
    574    value &= TARGET_PAGE_MASK;
    575    if (tlb_force_broadcast(env)) {
    576        tlb_flush_page_all_cpus_synced(cs, value);
    577    } else {
    578        tlb_flush_page(cs, value);
    579    }
    580}
    581
    582static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
    583                               uint64_t value)
    584{
    585    CPUState *cs = env_cpu(env);
    586
    587    tlb_flush_by_mmuidx(cs,
    588                        ARMMMUIdxBit_E10_1 |
    589                        ARMMMUIdxBit_E10_1_PAN |
    590                        ARMMMUIdxBit_E10_0);
    591}
    592
    593static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    594                                  uint64_t value)
    595{
    596    CPUState *cs = env_cpu(env);
    597
    598    tlb_flush_by_mmuidx_all_cpus_synced(cs,
    599                                        ARMMMUIdxBit_E10_1 |
    600                                        ARMMMUIdxBit_E10_1_PAN |
    601                                        ARMMMUIdxBit_E10_0);
    602}
    603
    604
    605static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
    606                              uint64_t value)
    607{
    608    CPUState *cs = env_cpu(env);
    609
    610    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
    611}
    612
    613static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    614                                 uint64_t value)
    615{
    616    CPUState *cs = env_cpu(env);
    617
    618    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
    619}
    620
    621static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
    622                              uint64_t value)
    623{
    624    CPUState *cs = env_cpu(env);
    625    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
    626
    627    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
    628}
    629
    630static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    631                                 uint64_t value)
    632{
    633    CPUState *cs = env_cpu(env);
    634    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
    635
    636    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
    637                                             ARMMMUIdxBit_E2);
    638}
    639
    640static const ARMCPRegInfo cp_reginfo[] = {
    641    /* Define the secure and non-secure FCSE identifier CP registers
    642     * separately because there is no secure bank in V8 (no _EL3).  This allows
    643     * the secure register to be properly reset and migrated. There is also no
    644     * v8 EL1 version of the register so the non-secure instance stands alone.
    645     */
    646    { .name = "FCSEIDR",
    647      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
    648      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
    649      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
    650      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
    651    { .name = "FCSEIDR_S",
    652      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
    653      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
    654      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
    655      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
    656    /* Define the secure and non-secure context identifier CP registers
    657     * separately because there is no secure bank in V8 (no _EL3).  This allows
    658     * the secure register to be properly reset and migrated.  In the
    659     * non-secure case, the 32-bit register will have reset and migration
    660     * disabled during registration as it is handled by the 64-bit instance.
    661     */
    662    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
    663      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
    664      .access = PL1_RW, .accessfn = access_tvm_trvm,
    665      .secure = ARM_CP_SECSTATE_NS,
    666      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
    667      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
    668    { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
    669      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
    670      .access = PL1_RW, .accessfn = access_tvm_trvm,
    671      .secure = ARM_CP_SECSTATE_S,
    672      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
    673      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
    674    REGINFO_SENTINEL
    675};
    676
    677static const ARMCPRegInfo not_v8_cp_reginfo[] = {
    678    /* NB: Some of these registers exist in v8 but with more precise
    679     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
    680     */
    681    /* MMU Domain access control / MPU write buffer control */
    682    { .name = "DACR",
    683      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
    684      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
    685      .writefn = dacr_write, .raw_writefn = raw_write,
    686      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
    687                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
    688    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
    689     * For v6 and v5, these mappings are overly broad.
    690     */
    691    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
    692      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    693    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
    694      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    695    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
    696      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    697    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
    698      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    699    /* Cache maintenance ops; some of this space may be overridden later. */
    700    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
    701      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
    702      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
    703    REGINFO_SENTINEL
    704};
    705
    706static const ARMCPRegInfo not_v6_cp_reginfo[] = {
    707    /* Not all pre-v6 cores implemented this WFI, so this is slightly
    708     * over-broad.
    709     */
    710    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
    711      .access = PL1_W, .type = ARM_CP_WFI },
    712    REGINFO_SENTINEL
    713};
    714
    715static const ARMCPRegInfo not_v7_cp_reginfo[] = {
    716    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
    717     * is UNPREDICTABLE; we choose to NOP as most implementations do).
    718     */
    719    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
    720      .access = PL1_W, .type = ARM_CP_WFI },
    721    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
    722     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
    723     * OMAPCP will override this space.
    724     */
    725    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
    726      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
    727      .resetvalue = 0 },
    728    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
    729      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
    730      .resetvalue = 0 },
    731    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
    732    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
    733      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
    734      .resetvalue = 0 },
    735    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
    736     * implementing it as RAZ means the "debug architecture version" bits
    737     * will read as a reserved value, which should cause Linux to not try
    738     * to use the debug hardware.
    739     */
    740    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
    741      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
    742    /* MMU TLB control. Note that the wildcarding means we cover not just
    743     * the unified TLB ops but also the dside/iside/inner-shareable variants.
    744     */
    745    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
    746      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
    747      .type = ARM_CP_NO_RAW },
    748    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
    749      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
    750      .type = ARM_CP_NO_RAW },
    751    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
    752      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
    753      .type = ARM_CP_NO_RAW },
    754    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
    755      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
    756      .type = ARM_CP_NO_RAW },
    757    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
    758      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
    759    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
    760      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
    761    REGINFO_SENTINEL
    762};
    763
    764static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    765                        uint64_t value)
    766{
    767    uint32_t mask = 0;
    768
    769    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
    770    if (!arm_feature(env, ARM_FEATURE_V8)) {
    771        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
    772         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
    773         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
    774         */
    775        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
    776            /* VFP coprocessor: cp10 & cp11 [23:20] */
    777            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
    778
    779            if (!arm_feature(env, ARM_FEATURE_NEON)) {
    780                /* ASEDIS [31] bit is RAO/WI */
    781                value |= (1 << 31);
    782            }
    783
    784            /* VFPv3 and upwards with NEON implement 32 double precision
    785             * registers (D0-D31).
    786             */
    787            if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
    788                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
    789                value |= (1 << 30);
    790            }
    791        }
    792        value &= mask;
    793    }
    794
    795    /*
    796     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
    797     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
    798     */
    799    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
    800        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
    801        value &= ~(0xf << 20);
    802        value |= env->cp15.cpacr_el1 & (0xf << 20);
    803    }
    804
    805    env->cp15.cpacr_el1 = value;
    806}
    807
    808static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    809{
    810    /*
    811     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
    812     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
    813     */
    814    uint64_t value = env->cp15.cpacr_el1;
    815
    816    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
    817        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
    818        value &= ~(0xf << 20);
    819    }
    820    return value;
    821}
    822
    823
    824static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
    825{
    826    /* Call cpacr_write() so that we reset with the correct RAO bits set
    827     * for our CPU features.
    828     */
    829    cpacr_write(env, ri, 0);
    830}
    831
    832static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
    833                                   bool isread)
    834{
    835    if (arm_feature(env, ARM_FEATURE_V8)) {
    836        /* Check if CPACR accesses are to be trapped to EL2 */
    837        if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
    838            (env->cp15.cptr_el[2] & CPTR_TCPAC)) {
    839            return CP_ACCESS_TRAP_EL2;
    840        /* Check if CPACR accesses are to be trapped to EL3 */
    841        } else if (arm_current_el(env) < 3 &&
    842                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
    843            return CP_ACCESS_TRAP_EL3;
    844        }
    845    }
    846
    847    return CP_ACCESS_OK;
    848}
    849
    850static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
    851                                  bool isread)
    852{
    853    /* Check if CPTR accesses are set to trap to EL3 */
    854    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
    855        return CP_ACCESS_TRAP_EL3;
    856    }
    857
    858    return CP_ACCESS_OK;
    859}
    860
    861static const ARMCPRegInfo v6_cp_reginfo[] = {
    862    /* prefetch by MVA in v6, NOP in v7 */
    863    { .name = "MVA_prefetch",
    864      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
    865      .access = PL1_W, .type = ARM_CP_NOP },
    866    /* We need to break the TB after ISB to execute self-modifying code
    867     * correctly and also to take any pending interrupts immediately.
    868     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
    869     */
    870    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
    871      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
    872    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
    873      .access = PL0_W, .type = ARM_CP_NOP },
    874    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
    875      .access = PL0_W, .type = ARM_CP_NOP },
    876    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
    877      .access = PL1_RW, .accessfn = access_tvm_trvm,
    878      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
    879                             offsetof(CPUARMState, cp15.ifar_ns) },
    880      .resetvalue = 0, },
    881    /* Watchpoint Fault Address Register : should actually only be present
    882     * for 1136, 1176, 11MPCore.
    883     */
    884    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
    885      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
    886    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
    887      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
    888      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
    889      .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
    890    REGINFO_SENTINEL
    891};
    892
    893typedef struct pm_event {
    894    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
    895    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
    896    bool (*supported)(CPUARMState *);
    897    /*
    898     * Retrieve the current count of the underlying event. The programmed
    899     * counters hold a difference from the return value from this function
    900     */
    901    uint64_t (*get_count)(CPUARMState *);
    902    /*
    903     * Return how many nanoseconds it will take (at a minimum) for count events
    904     * to occur. A negative value indicates the counter will never overflow, or
    905     * that the counter has otherwise arranged for the overflow bit to be set
    906     * and the PMU interrupt to be raised on overflow.
    907     */
    908    int64_t (*ns_per_count)(uint64_t);
    909} pm_event;
    910
    911static bool event_always_supported(CPUARMState *env)
    912{
    913    return true;
    914}
    915
    916static uint64_t swinc_get_count(CPUARMState *env)
    917{
    918    /*
    919     * SW_INCR events are written directly to the pmevcntr's by writes to
    920     * PMSWINC, so there is no underlying count maintained by the PMU itself
    921     */
    922    return 0;
    923}
    924
    925static int64_t swinc_ns_per(uint64_t ignored)
    926{
    927    return -1;
    928}
    929
    930/*
    931 * Return the underlying cycle count for the PMU cycle counters. If we're in
    932 * usermode, simply return 0.
    933 */
    934static uint64_t cycles_get_count(CPUARMState *env)
    935{
    936#ifndef CONFIG_USER_ONLY
    937    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    938                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
    939#else
    940    return cpu_get_host_ticks();
    941#endif
    942}
    943
    944#ifndef CONFIG_USER_ONLY
    945static int64_t cycles_ns_per(uint64_t cycles)
    946{
    947    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
    948}
    949
    950static bool instructions_supported(CPUARMState *env)
    951{
    952    return icount_enabled() == 1; /* Precise instruction counting */
    953}
    954
    955static uint64_t instructions_get_count(CPUARMState *env)
    956{
    957    return (uint64_t)icount_get_raw();
    958}
    959
    960static int64_t instructions_ns_per(uint64_t icount)
    961{
    962    return icount_to_ns((int64_t)icount);
    963}
    964#endif
    965
    966static bool pmu_8_1_events_supported(CPUARMState *env)
    967{
    968    /* For events which are supported in any v8.1 PMU */
    969    return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
    970}
    971
    972static bool pmu_8_4_events_supported(CPUARMState *env)
    973{
    974    /* For events which are supported in any v8.1 PMU */
    975    return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
    976}
    977
    978static uint64_t zero_event_get_count(CPUARMState *env)
    979{
    980    /* For events which on QEMU never fire, so their count is always zero */
    981    return 0;
    982}
    983
    984static int64_t zero_event_ns_per(uint64_t cycles)
    985{
    986    /* An event which never fires can never overflow */
    987    return -1;
    988}
    989
    990static const pm_event pm_events[] = {
    991    { .number = 0x000, /* SW_INCR */
    992      .supported = event_always_supported,
    993      .get_count = swinc_get_count,
    994      .ns_per_count = swinc_ns_per,
    995    },
    996#ifndef CONFIG_USER_ONLY
    997    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
    998      .supported = instructions_supported,
    999      .get_count = instructions_get_count,
   1000      .ns_per_count = instructions_ns_per,
   1001    },
   1002    { .number = 0x011, /* CPU_CYCLES, Cycle */
   1003      .supported = event_always_supported,
   1004      .get_count = cycles_get_count,
   1005      .ns_per_count = cycles_ns_per,
   1006    },
   1007#endif
   1008    { .number = 0x023, /* STALL_FRONTEND */
   1009      .supported = pmu_8_1_events_supported,
   1010      .get_count = zero_event_get_count,
   1011      .ns_per_count = zero_event_ns_per,
   1012    },
   1013    { .number = 0x024, /* STALL_BACKEND */
   1014      .supported = pmu_8_1_events_supported,
   1015      .get_count = zero_event_get_count,
   1016      .ns_per_count = zero_event_ns_per,
   1017    },
   1018    { .number = 0x03c, /* STALL */
   1019      .supported = pmu_8_4_events_supported,
   1020      .get_count = zero_event_get_count,
   1021      .ns_per_count = zero_event_ns_per,
   1022    },
   1023};
   1024
   1025/*
   1026 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
   1027 * events (i.e. the statistical profiling extension), this implementation
   1028 * should first be updated to something sparse instead of the current
   1029 * supported_event_map[] array.
   1030 */
   1031#define MAX_EVENT_ID 0x3c
   1032#define UNSUPPORTED_EVENT UINT16_MAX
   1033static uint16_t supported_event_map[MAX_EVENT_ID + 1];
   1034
   1035/*
   1036 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
   1037 * of ARM event numbers to indices in our pm_events array.
   1038 *
   1039 * Note: Events in the 0x40XX range are not currently supported.
   1040 */
   1041void pmu_init(ARMCPU *cpu)
   1042{
   1043    unsigned int i;
   1044
   1045    /*
   1046     * Empty supported_event_map and cpu->pmceid[01] before adding supported
   1047     * events to them
   1048     */
   1049    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
   1050        supported_event_map[i] = UNSUPPORTED_EVENT;
   1051    }
   1052    cpu->pmceid0 = 0;
   1053    cpu->pmceid1 = 0;
   1054
   1055    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
   1056        const pm_event *cnt = &pm_events[i];
   1057        assert(cnt->number <= MAX_EVENT_ID);
   1058        /* We do not currently support events in the 0x40xx range */
   1059        assert(cnt->number <= 0x3f);
   1060
   1061        if (cnt->supported(&cpu->env)) {
   1062            supported_event_map[cnt->number] = i;
   1063            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
   1064            if (cnt->number & 0x20) {
   1065                cpu->pmceid1 |= event_mask;
   1066            } else {
   1067                cpu->pmceid0 |= event_mask;
   1068            }
   1069        }
   1070    }
   1071}
   1072
   1073/*
   1074 * Check at runtime whether a PMU event is supported for the current machine
   1075 */
   1076static bool event_supported(uint16_t number)
   1077{
   1078    if (number > MAX_EVENT_ID) {
   1079        return false;
   1080    }
   1081    return supported_event_map[number] != UNSUPPORTED_EVENT;
   1082}
   1083
   1084static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
   1085                                   bool isread)
   1086{
   1087    /* Performance monitor registers user accessibility is controlled
   1088     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
   1089     * trapping to EL2 or EL3 for other accesses.
   1090     */
   1091    int el = arm_current_el(env);
   1092    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
   1093
   1094    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
   1095        return CP_ACCESS_TRAP;
   1096    }
   1097    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
   1098        return CP_ACCESS_TRAP_EL2;
   1099    }
   1100    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
   1101        return CP_ACCESS_TRAP_EL3;
   1102    }
   1103
   1104    return CP_ACCESS_OK;
   1105}
   1106
   1107static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
   1108                                           const ARMCPRegInfo *ri,
   1109                                           bool isread)
   1110{
   1111    /* ER: event counter read trap control */
   1112    if (arm_feature(env, ARM_FEATURE_V8)
   1113        && arm_current_el(env) == 0
   1114        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
   1115        && isread) {
   1116        return CP_ACCESS_OK;
   1117    }
   1118
   1119    return pmreg_access(env, ri, isread);
   1120}
   1121
   1122static CPAccessResult pmreg_access_swinc(CPUARMState *env,
   1123                                         const ARMCPRegInfo *ri,
   1124                                         bool isread)
   1125{
   1126    /* SW: software increment write trap control */
   1127    if (arm_feature(env, ARM_FEATURE_V8)
   1128        && arm_current_el(env) == 0
   1129        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
   1130        && !isread) {
   1131        return CP_ACCESS_OK;
   1132    }
   1133
   1134    return pmreg_access(env, ri, isread);
   1135}
   1136
   1137static CPAccessResult pmreg_access_selr(CPUARMState *env,
   1138                                        const ARMCPRegInfo *ri,
   1139                                        bool isread)
   1140{
   1141    /* ER: event counter read trap control */
   1142    if (arm_feature(env, ARM_FEATURE_V8)
   1143        && arm_current_el(env) == 0
   1144        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
   1145        return CP_ACCESS_OK;
   1146    }
   1147
   1148    return pmreg_access(env, ri, isread);
   1149}
   1150
   1151static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
   1152                                         const ARMCPRegInfo *ri,
   1153                                         bool isread)
   1154{
   1155    /* CR: cycle counter read trap control */
   1156    if (arm_feature(env, ARM_FEATURE_V8)
   1157        && arm_current_el(env) == 0
   1158        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
   1159        && isread) {
   1160        return CP_ACCESS_OK;
   1161    }
   1162
   1163    return pmreg_access(env, ri, isread);
   1164}
   1165
   1166/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
   1167 * the current EL, security state, and register configuration.
   1168 */
   1169static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
   1170{
   1171    uint64_t filter;
   1172    bool e, p, u, nsk, nsu, nsh, m;
   1173    bool enabled, prohibited, filtered;
   1174    bool secure = arm_is_secure(env);
   1175    int el = arm_current_el(env);
   1176    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
   1177    uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
   1178
   1179    if (!arm_feature(env, ARM_FEATURE_PMU)) {
   1180        return false;
   1181    }
   1182
   1183    if (!arm_feature(env, ARM_FEATURE_EL2) ||
   1184            (counter < hpmn || counter == 31)) {
   1185        e = env->cp15.c9_pmcr & PMCRE;
   1186    } else {
   1187        e = mdcr_el2 & MDCR_HPME;
   1188    }
   1189    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
   1190
   1191    if (!secure) {
   1192        if (el == 2 && (counter < hpmn || counter == 31)) {
   1193            prohibited = mdcr_el2 & MDCR_HPMD;
   1194        } else {
   1195            prohibited = false;
   1196        }
   1197    } else {
   1198        prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
   1199           !(env->cp15.mdcr_el3 & MDCR_SPME);
   1200    }
   1201
   1202    if (prohibited && counter == 31) {
   1203        prohibited = env->cp15.c9_pmcr & PMCRDP;
   1204    }
   1205
   1206    if (counter == 31) {
   1207        filter = env->cp15.pmccfiltr_el0;
   1208    } else {
   1209        filter = env->cp15.c14_pmevtyper[counter];
   1210    }
   1211
   1212    p   = filter & PMXEVTYPER_P;
   1213    u   = filter & PMXEVTYPER_U;
   1214    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
   1215    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
   1216    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
   1217    m   = arm_el_is_aa64(env, 1) &&
   1218              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
   1219
   1220    if (el == 0) {
   1221        filtered = secure ? u : u != nsu;
   1222    } else if (el == 1) {
   1223        filtered = secure ? p : p != nsk;
   1224    } else if (el == 2) {
   1225        filtered = !nsh;
   1226    } else { /* EL3 */
   1227        filtered = m != p;
   1228    }
   1229
   1230    if (counter != 31) {
   1231        /*
   1232         * If not checking PMCCNTR, ensure the counter is setup to an event we
   1233         * support
   1234         */
   1235        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
   1236        if (!event_supported(event)) {
   1237            return false;
   1238        }
   1239    }
   1240
   1241    return enabled && !prohibited && !filtered;
   1242}
   1243
   1244static void pmu_update_irq(CPUARMState *env)
   1245{
   1246    ARMCPU *cpu = env_archcpu(env);
   1247    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
   1248            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
   1249}
   1250
   1251/*
   1252 * Ensure c15_ccnt is the guest-visible count so that operations such as
   1253 * enabling/disabling the counter or filtering, modifying the count itself,
   1254 * etc. can be done logically. This is essentially a no-op if the counter is
   1255 * not enabled at the time of the call.
   1256 */
   1257static void pmccntr_op_start(CPUARMState *env)
   1258{
   1259    uint64_t cycles = cycles_get_count(env);
   1260
   1261    if (pmu_counter_enabled(env, 31)) {
   1262        uint64_t eff_cycles = cycles;
   1263        if (env->cp15.c9_pmcr & PMCRD) {
   1264            /* Increment once every 64 processor clock cycles */
   1265            eff_cycles /= 64;
   1266        }
   1267
   1268        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
   1269
   1270        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
   1271                                 1ull << 63 : 1ull << 31;
   1272        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
   1273            env->cp15.c9_pmovsr |= (1 << 31);
   1274            pmu_update_irq(env);
   1275        }
   1276
   1277        env->cp15.c15_ccnt = new_pmccntr;
   1278    }
   1279    env->cp15.c15_ccnt_delta = cycles;
   1280}
   1281
   1282/*
   1283 * If PMCCNTR is enabled, recalculate the delta between the clock and the
   1284 * guest-visible count. A call to pmccntr_op_finish should follow every call to
   1285 * pmccntr_op_start.
   1286 */
   1287static void pmccntr_op_finish(CPUARMState *env)
   1288{
   1289    if (pmu_counter_enabled(env, 31)) {
   1290#ifndef CONFIG_USER_ONLY
   1291        /* Calculate when the counter will next overflow */
   1292        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
   1293        if (!(env->cp15.c9_pmcr & PMCRLC)) {
   1294            remaining_cycles = (uint32_t)remaining_cycles;
   1295        }
   1296        int64_t overflow_in = cycles_ns_per(remaining_cycles);
   1297
   1298        if (overflow_in > 0) {
   1299            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
   1300                overflow_in;
   1301            ARMCPU *cpu = env_archcpu(env);
   1302            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
   1303        }
   1304#endif
   1305
   1306        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
   1307        if (env->cp15.c9_pmcr & PMCRD) {
   1308            /* Increment once every 64 processor clock cycles */
   1309            prev_cycles /= 64;
   1310        }
   1311        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
   1312    }
   1313}
   1314
   1315static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
   1316{
   1317
   1318    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
   1319    uint64_t count = 0;
   1320    if (event_supported(event)) {
   1321        uint16_t event_idx = supported_event_map[event];
   1322        count = pm_events[event_idx].get_count(env);
   1323    }
   1324
   1325    if (pmu_counter_enabled(env, counter)) {
   1326        uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
   1327
   1328        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
   1329            env->cp15.c9_pmovsr |= (1 << counter);
   1330            pmu_update_irq(env);
   1331        }
   1332        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
   1333    }
   1334    env->cp15.c14_pmevcntr_delta[counter] = count;
   1335}
   1336
   1337static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
   1338{
   1339    if (pmu_counter_enabled(env, counter)) {
   1340#ifndef CONFIG_USER_ONLY
   1341        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
   1342        uint16_t event_idx = supported_event_map[event];
   1343        uint64_t delta = UINT32_MAX -
   1344            (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
   1345        int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
   1346
   1347        if (overflow_in > 0) {
   1348            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
   1349                overflow_in;
   1350            ARMCPU *cpu = env_archcpu(env);
   1351            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
   1352        }
   1353#endif
   1354
   1355        env->cp15.c14_pmevcntr_delta[counter] -=
   1356            env->cp15.c14_pmevcntr[counter];
   1357    }
   1358}
   1359
   1360void pmu_op_start(CPUARMState *env)
   1361{
   1362    unsigned int i;
   1363    pmccntr_op_start(env);
   1364    for (i = 0; i < pmu_num_counters(env); i++) {
   1365        pmevcntr_op_start(env, i);
   1366    }
   1367}
   1368
   1369void pmu_op_finish(CPUARMState *env)
   1370{
   1371    unsigned int i;
   1372    pmccntr_op_finish(env);
   1373    for (i = 0; i < pmu_num_counters(env); i++) {
   1374        pmevcntr_op_finish(env, i);
   1375    }
   1376}
   1377
   1378void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
   1379{
   1380    pmu_op_start(&cpu->env);
   1381}
   1382
   1383void pmu_post_el_change(ARMCPU *cpu, void *ignored)
   1384{
   1385    pmu_op_finish(&cpu->env);
   1386}
   1387
   1388void arm_pmu_timer_cb(void *opaque)
   1389{
   1390    ARMCPU *cpu = opaque;
   1391
   1392    /*
   1393     * Update all the counter values based on the current underlying counts,
   1394     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
   1395     * has the effect of setting the cpu->pmu_timer to the next earliest time a
   1396     * counter may expire.
   1397     */
   1398    pmu_op_start(&cpu->env);
   1399    pmu_op_finish(&cpu->env);
   1400}
   1401
   1402static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1403                       uint64_t value)
   1404{
   1405    pmu_op_start(env);
   1406
   1407    if (value & PMCRC) {
   1408        /* The counter has been reset */
   1409        env->cp15.c15_ccnt = 0;
   1410    }
   1411
   1412    if (value & PMCRP) {
   1413        unsigned int i;
   1414        for (i = 0; i < pmu_num_counters(env); i++) {
   1415            env->cp15.c14_pmevcntr[i] = 0;
   1416        }
   1417    }
   1418
   1419    env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
   1420    env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
   1421
   1422    pmu_op_finish(env);
   1423}
   1424
   1425static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1426                          uint64_t value)
   1427{
   1428    unsigned int i;
   1429    for (i = 0; i < pmu_num_counters(env); i++) {
   1430        /* Increment a counter's count iff: */
   1431        if ((value & (1 << i)) && /* counter's bit is set */
   1432                /* counter is enabled and not filtered */
   1433                pmu_counter_enabled(env, i) &&
   1434                /* counter is SW_INCR */
   1435                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
   1436            pmevcntr_op_start(env, i);
   1437
   1438            /*
   1439             * Detect if this write causes an overflow since we can't predict
   1440             * PMSWINC overflows like we can for other events
   1441             */
   1442            uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
   1443
   1444            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
   1445                env->cp15.c9_pmovsr |= (1 << i);
   1446                pmu_update_irq(env);
   1447            }
   1448
   1449            env->cp15.c14_pmevcntr[i] = new_pmswinc;
   1450
   1451            pmevcntr_op_finish(env, i);
   1452        }
   1453    }
   1454}
   1455
   1456static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1457{
   1458    uint64_t ret;
   1459    pmccntr_op_start(env);
   1460    ret = env->cp15.c15_ccnt;
   1461    pmccntr_op_finish(env);
   1462    return ret;
   1463}
   1464
   1465static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1466                         uint64_t value)
   1467{
   1468    /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
   1469     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
   1470     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
   1471     * accessed.
   1472     */
   1473    env->cp15.c9_pmselr = value & 0x1f;
   1474}
   1475
   1476static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1477                        uint64_t value)
   1478{
   1479    pmccntr_op_start(env);
   1480    env->cp15.c15_ccnt = value;
   1481    pmccntr_op_finish(env);
   1482}
   1483
   1484static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
   1485                            uint64_t value)
   1486{
   1487    uint64_t cur_val = pmccntr_read(env, NULL);
   1488
   1489    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
   1490}
   1491
   1492static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1493                            uint64_t value)
   1494{
   1495    pmccntr_op_start(env);
   1496    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
   1497    pmccntr_op_finish(env);
   1498}
   1499
   1500static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
   1501                            uint64_t value)
   1502{
   1503    pmccntr_op_start(env);
   1504    /* M is not accessible from AArch32 */
   1505    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
   1506        (value & PMCCFILTR);
   1507    pmccntr_op_finish(env);
   1508}
   1509
   1510static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
   1511{
   1512    /* M is not visible in AArch32 */
   1513    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
   1514}
   1515
   1516static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1517                            uint64_t value)
   1518{
   1519    value &= pmu_counter_mask(env);
   1520    env->cp15.c9_pmcnten |= value;
   1521}
   1522
   1523static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1524                             uint64_t value)
   1525{
   1526    value &= pmu_counter_mask(env);
   1527    env->cp15.c9_pmcnten &= ~value;
   1528}
   1529
   1530static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1531                         uint64_t value)
   1532{
   1533    value &= pmu_counter_mask(env);
   1534    env->cp15.c9_pmovsr &= ~value;
   1535    pmu_update_irq(env);
   1536}
   1537
   1538static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1539                         uint64_t value)
   1540{
   1541    value &= pmu_counter_mask(env);
   1542    env->cp15.c9_pmovsr |= value;
   1543    pmu_update_irq(env);
   1544}
   1545
   1546static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1547                             uint64_t value, const uint8_t counter)
   1548{
   1549    if (counter == 31) {
   1550        pmccfiltr_write(env, ri, value);
   1551    } else if (counter < pmu_num_counters(env)) {
   1552        pmevcntr_op_start(env, counter);
   1553
   1554        /*
   1555         * If this counter's event type is changing, store the current
   1556         * underlying count for the new type in c14_pmevcntr_delta[counter] so
   1557         * pmevcntr_op_finish has the correct baseline when it converts back to
   1558         * a delta.
   1559         */
   1560        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
   1561            PMXEVTYPER_EVTCOUNT;
   1562        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
   1563        if (old_event != new_event) {
   1564            uint64_t count = 0;
   1565            if (event_supported(new_event)) {
   1566                uint16_t event_idx = supported_event_map[new_event];
   1567                count = pm_events[event_idx].get_count(env);
   1568            }
   1569            env->cp15.c14_pmevcntr_delta[counter] = count;
   1570        }
   1571
   1572        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
   1573        pmevcntr_op_finish(env, counter);
   1574    }
   1575    /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
   1576     * PMSELR value is equal to or greater than the number of implemented
   1577     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
   1578     */
   1579}
   1580
   1581static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
   1582                               const uint8_t counter)
   1583{
   1584    if (counter == 31) {
   1585        return env->cp15.pmccfiltr_el0;
   1586    } else if (counter < pmu_num_counters(env)) {
   1587        return env->cp15.c14_pmevtyper[counter];
   1588    } else {
   1589      /*
   1590       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
   1591       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
   1592       */
   1593        return 0;
   1594    }
   1595}
   1596
   1597static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
   1598                              uint64_t value)
   1599{
   1600    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1601    pmevtyper_write(env, ri, value, counter);
   1602}
   1603
   1604static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
   1605                               uint64_t value)
   1606{
   1607    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1608    env->cp15.c14_pmevtyper[counter] = value;
   1609
   1610    /*
   1611     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
   1612     * pmu_op_finish calls when loading saved state for a migration. Because
   1613     * we're potentially updating the type of event here, the value written to
   1614     * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
   1615     * different counter type. Therefore, we need to set this value to the
   1616     * current count for the counter type we're writing so that pmu_op_finish
   1617     * has the correct count for its calculation.
   1618     */
   1619    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
   1620    if (event_supported(event)) {
   1621        uint16_t event_idx = supported_event_map[event];
   1622        env->cp15.c14_pmevcntr_delta[counter] =
   1623            pm_events[event_idx].get_count(env);
   1624    }
   1625}
   1626
   1627static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
   1628{
   1629    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1630    return pmevtyper_read(env, ri, counter);
   1631}
   1632
   1633static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1634                             uint64_t value)
   1635{
   1636    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
   1637}
   1638
   1639static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1640{
   1641    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
   1642}
   1643
   1644static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1645                             uint64_t value, uint8_t counter)
   1646{
   1647    if (counter < pmu_num_counters(env)) {
   1648        pmevcntr_op_start(env, counter);
   1649        env->cp15.c14_pmevcntr[counter] = value;
   1650        pmevcntr_op_finish(env, counter);
   1651    }
   1652    /*
   1653     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
   1654     * are CONSTRAINED UNPREDICTABLE.
   1655     */
   1656}
   1657
   1658static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
   1659                              uint8_t counter)
   1660{
   1661    if (counter < pmu_num_counters(env)) {
   1662        uint64_t ret;
   1663        pmevcntr_op_start(env, counter);
   1664        ret = env->cp15.c14_pmevcntr[counter];
   1665        pmevcntr_op_finish(env, counter);
   1666        return ret;
   1667    } else {
   1668      /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
   1669       * are CONSTRAINED UNPREDICTABLE. */
   1670        return 0;
   1671    }
   1672}
   1673
   1674static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
   1675                             uint64_t value)
   1676{
   1677    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1678    pmevcntr_write(env, ri, value, counter);
   1679}
   1680
   1681static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
   1682{
   1683    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1684    return pmevcntr_read(env, ri, counter);
   1685}
   1686
   1687static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
   1688                             uint64_t value)
   1689{
   1690    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1691    assert(counter < pmu_num_counters(env));
   1692    env->cp15.c14_pmevcntr[counter] = value;
   1693    pmevcntr_write(env, ri, value, counter);
   1694}
   1695
   1696static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
   1697{
   1698    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1699    assert(counter < pmu_num_counters(env));
   1700    return env->cp15.c14_pmevcntr[counter];
   1701}
   1702
   1703static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1704                             uint64_t value)
   1705{
   1706    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
   1707}
   1708
   1709static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1710{
   1711    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
   1712}
   1713
   1714static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1715                            uint64_t value)
   1716{
   1717    if (arm_feature(env, ARM_FEATURE_V8)) {
   1718        env->cp15.c9_pmuserenr = value & 0xf;
   1719    } else {
   1720        env->cp15.c9_pmuserenr = value & 1;
   1721    }
   1722}
   1723
   1724static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1725                             uint64_t value)
   1726{
   1727    /* We have no event counters so only the C bit can be changed */
   1728    value &= pmu_counter_mask(env);
   1729    env->cp15.c9_pminten |= value;
   1730    pmu_update_irq(env);
   1731}
   1732
   1733static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1734                             uint64_t value)
   1735{
   1736    value &= pmu_counter_mask(env);
   1737    env->cp15.c9_pminten &= ~value;
   1738    pmu_update_irq(env);
   1739}
   1740
   1741static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1742                       uint64_t value)
   1743{
   1744    /* Note that even though the AArch64 view of this register has bits
   1745     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
   1746     * architectural requirements for bits which are RES0 only in some
   1747     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
   1748     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
   1749     */
   1750    raw_write(env, ri, value & ~0x1FULL);
   1751}
   1752
   1753static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   1754{
   1755    /* Begin with base v8.0 state.  */
   1756    uint32_t valid_mask = 0x3fff;
   1757    ARMCPU *cpu = env_archcpu(env);
   1758
   1759    if (ri->state == ARM_CP_STATE_AA64) {
   1760        if (arm_feature(env, ARM_FEATURE_AARCH64) &&
   1761            !cpu_isar_feature(aa64_aa32_el1, cpu)) {
   1762                value |= SCR_FW | SCR_AW;   /* these two bits are RES1.  */
   1763        }
   1764        valid_mask &= ~SCR_NET;
   1765
   1766        if (cpu_isar_feature(aa64_lor, cpu)) {
   1767            valid_mask |= SCR_TLOR;
   1768        }
   1769        if (cpu_isar_feature(aa64_pauth, cpu)) {
   1770            valid_mask |= SCR_API | SCR_APK;
   1771        }
   1772        if (cpu_isar_feature(aa64_sel2, cpu)) {
   1773            valid_mask |= SCR_EEL2;
   1774        }
   1775        if (cpu_isar_feature(aa64_mte, cpu)) {
   1776            valid_mask |= SCR_ATA;
   1777        }
   1778    } else {
   1779        valid_mask &= ~(SCR_RW | SCR_ST);
   1780    }
   1781
   1782    if (!arm_feature(env, ARM_FEATURE_EL2)) {
   1783        valid_mask &= ~SCR_HCE;
   1784
   1785        /* On ARMv7, SMD (or SCD as it is called in v7) is only
   1786         * supported if EL2 exists. The bit is UNK/SBZP when
   1787         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
   1788         * when EL2 is unavailable.
   1789         * On ARMv8, this bit is always available.
   1790         */
   1791        if (arm_feature(env, ARM_FEATURE_V7) &&
   1792            !arm_feature(env, ARM_FEATURE_V8)) {
   1793            valid_mask &= ~SCR_SMD;
   1794        }
   1795    }
   1796
   1797    /* Clear all-context RES0 bits.  */
   1798    value &= valid_mask;
   1799    raw_write(env, ri, value);
   1800}
   1801
   1802static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   1803{
   1804    /*
   1805     * scr_write will set the RES1 bits on an AArch64-only CPU.
   1806     * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
   1807     */
   1808    scr_write(env, ri, 0);
   1809}
   1810
   1811static CPAccessResult access_aa64_tid2(CPUARMState *env,
   1812                                       const ARMCPRegInfo *ri,
   1813                                       bool isread)
   1814{
   1815    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
   1816        return CP_ACCESS_TRAP_EL2;
   1817    }
   1818
   1819    return CP_ACCESS_OK;
   1820}
   1821
   1822static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1823{
   1824    ARMCPU *cpu = env_archcpu(env);
   1825
   1826    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
   1827     * bank
   1828     */
   1829    uint32_t index = A32_BANKED_REG_GET(env, csselr,
   1830                                        ri->secure & ARM_CP_SECSTATE_S);
   1831
   1832    return cpu->ccsidr[index];
   1833}
   1834
   1835static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1836                         uint64_t value)
   1837{
   1838    raw_write(env, ri, value & 0xf);
   1839}
   1840
   1841static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1842{
   1843    CPUState *cs = env_cpu(env);
   1844    bool el1 = arm_current_el(env) == 1;
   1845    uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
   1846    uint64_t ret = 0;
   1847
   1848    if (hcr_el2 & HCR_IMO) {
   1849        if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
   1850            ret |= CPSR_I;
   1851        }
   1852    } else {
   1853        if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
   1854            ret |= CPSR_I;
   1855        }
   1856    }
   1857
   1858    if (hcr_el2 & HCR_FMO) {
   1859        if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
   1860            ret |= CPSR_F;
   1861        }
   1862    } else {
   1863        if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
   1864            ret |= CPSR_F;
   1865        }
   1866    }
   1867
   1868    /* External aborts are not possible in QEMU so A bit is always clear */
   1869    return ret;
   1870}
   1871
   1872static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
   1873                                       bool isread)
   1874{
   1875    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
   1876        return CP_ACCESS_TRAP_EL2;
   1877    }
   1878
   1879    return CP_ACCESS_OK;
   1880}
   1881
   1882static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
   1883                                       bool isread)
   1884{
   1885    if (arm_feature(env, ARM_FEATURE_V8)) {
   1886        return access_aa64_tid1(env, ri, isread);
   1887    }
   1888
   1889    return CP_ACCESS_OK;
   1890}
   1891
   1892static const ARMCPRegInfo v7_cp_reginfo[] = {
   1893    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
   1894    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
   1895      .access = PL1_W, .type = ARM_CP_NOP },
   1896    /* Performance monitors are implementation defined in v7,
   1897     * but with an ARM recommended set of registers, which we
   1898     * follow.
   1899     *
   1900     * Performance registers fall into three categories:
   1901     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
   1902     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
   1903     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
   1904     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
   1905     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
   1906     */
   1907    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
   1908      .access = PL0_RW, .type = ARM_CP_ALIAS,
   1909      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
   1910      .writefn = pmcntenset_write,
   1911      .accessfn = pmreg_access,
   1912      .raw_writefn = raw_write },
   1913    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
   1914      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
   1915      .access = PL0_RW, .accessfn = pmreg_access,
   1916      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
   1917      .writefn = pmcntenset_write, .raw_writefn = raw_write },
   1918    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
   1919      .access = PL0_RW,
   1920      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
   1921      .accessfn = pmreg_access,
   1922      .writefn = pmcntenclr_write,
   1923      .type = ARM_CP_ALIAS },
   1924    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
   1925      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
   1926      .access = PL0_RW, .accessfn = pmreg_access,
   1927      .type = ARM_CP_ALIAS,
   1928      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
   1929      .writefn = pmcntenclr_write },
   1930    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
   1931      .access = PL0_RW, .type = ARM_CP_IO,
   1932      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
   1933      .accessfn = pmreg_access,
   1934      .writefn = pmovsr_write,
   1935      .raw_writefn = raw_write },
   1936    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
   1937      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
   1938      .access = PL0_RW, .accessfn = pmreg_access,
   1939      .type = ARM_CP_ALIAS | ARM_CP_IO,
   1940      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
   1941      .writefn = pmovsr_write,
   1942      .raw_writefn = raw_write },
   1943    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
   1944      .access = PL0_W, .accessfn = pmreg_access_swinc,
   1945      .type = ARM_CP_NO_RAW | ARM_CP_IO,
   1946      .writefn = pmswinc_write },
   1947    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
   1948      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
   1949      .access = PL0_W, .accessfn = pmreg_access_swinc,
   1950      .type = ARM_CP_NO_RAW | ARM_CP_IO,
   1951      .writefn = pmswinc_write },
   1952    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
   1953      .access = PL0_RW, .type = ARM_CP_ALIAS,
   1954      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
   1955      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
   1956      .raw_writefn = raw_write},
   1957    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
   1958      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
   1959      .access = PL0_RW, .accessfn = pmreg_access_selr,
   1960      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
   1961      .writefn = pmselr_write, .raw_writefn = raw_write, },
   1962    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
   1963      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
   1964      .readfn = pmccntr_read, .writefn = pmccntr_write32,
   1965      .accessfn = pmreg_access_ccntr },
   1966    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
   1967      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
   1968      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
   1969      .type = ARM_CP_IO,
   1970      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
   1971      .readfn = pmccntr_read, .writefn = pmccntr_write,
   1972      .raw_readfn = raw_read, .raw_writefn = raw_write, },
   1973    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
   1974      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
   1975      .access = PL0_RW, .accessfn = pmreg_access,
   1976      .type = ARM_CP_ALIAS | ARM_CP_IO,
   1977      .resetvalue = 0, },
   1978    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
   1979      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
   1980      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
   1981      .access = PL0_RW, .accessfn = pmreg_access,
   1982      .type = ARM_CP_IO,
   1983      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
   1984      .resetvalue = 0, },
   1985    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
   1986      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   1987      .accessfn = pmreg_access,
   1988      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
   1989    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
   1990      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
   1991      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   1992      .accessfn = pmreg_access,
   1993      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
   1994    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
   1995      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   1996      .accessfn = pmreg_access_xevcntr,
   1997      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
   1998    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
   1999      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
   2000      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2001      .accessfn = pmreg_access_xevcntr,
   2002      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
   2003    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
   2004      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
   2005      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
   2006      .resetvalue = 0,
   2007      .writefn = pmuserenr_write, .raw_writefn = raw_write },
   2008    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
   2009      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
   2010      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
   2011      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
   2012      .resetvalue = 0,
   2013      .writefn = pmuserenr_write, .raw_writefn = raw_write },
   2014    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
   2015      .access = PL1_RW, .accessfn = access_tpm,
   2016      .type = ARM_CP_ALIAS | ARM_CP_IO,
   2017      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
   2018      .resetvalue = 0,
   2019      .writefn = pmintenset_write, .raw_writefn = raw_write },
   2020    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
   2021      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
   2022      .access = PL1_RW, .accessfn = access_tpm,
   2023      .type = ARM_CP_IO,
   2024      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
   2025      .writefn = pmintenset_write, .raw_writefn = raw_write,
   2026      .resetvalue = 0x0 },
   2027    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
   2028      .access = PL1_RW, .accessfn = access_tpm,
   2029      .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
   2030      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
   2031      .writefn = pmintenclr_write, },
   2032    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
   2033      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
   2034      .access = PL1_RW, .accessfn = access_tpm,
   2035      .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
   2036      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
   2037      .writefn = pmintenclr_write },
   2038    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
   2039      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
   2040      .access = PL1_R,
   2041      .accessfn = access_aa64_tid2,
   2042      .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
   2043    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
   2044      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
   2045      .access = PL1_RW,
   2046      .accessfn = access_aa64_tid2,
   2047      .writefn = csselr_write, .resetvalue = 0,
   2048      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
   2049                             offsetof(CPUARMState, cp15.csselr_ns) } },
   2050    /* Auxiliary ID register: this actually has an IMPDEF value but for now
   2051     * just RAZ for all cores:
   2052     */
   2053    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
   2054      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
   2055      .access = PL1_R, .type = ARM_CP_CONST,
   2056      .accessfn = access_aa64_tid1,
   2057      .resetvalue = 0 },
   2058    /* Auxiliary fault status registers: these also are IMPDEF, and we
   2059     * choose to RAZ/WI for all cores.
   2060     */
   2061    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
   2062      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
   2063      .access = PL1_RW, .accessfn = access_tvm_trvm,
   2064      .type = ARM_CP_CONST, .resetvalue = 0 },
   2065    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
   2066      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
   2067      .access = PL1_RW, .accessfn = access_tvm_trvm,
   2068      .type = ARM_CP_CONST, .resetvalue = 0 },
   2069    /* MAIR can just read-as-written because we don't implement caches
   2070     * and so don't need to care about memory attributes.
   2071     */
   2072    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
   2073      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
   2074      .access = PL1_RW, .accessfn = access_tvm_trvm,
   2075      .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
   2076      .resetvalue = 0 },
   2077    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
   2078      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
   2079      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
   2080      .resetvalue = 0 },
   2081    /* For non-long-descriptor page tables these are PRRR and NMRR;
   2082     * regardless they still act as reads-as-written for QEMU.
   2083     */
   2084     /* MAIR0/1 are defined separately from their 64-bit counterpart which
   2085      * allows them to assign the correct fieldoffset based on the endianness
   2086      * handled in the field definitions.
   2087      */
   2088    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
   2089      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
   2090      .access = PL1_RW, .accessfn = access_tvm_trvm,
   2091      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
   2092                             offsetof(CPUARMState, cp15.mair0_ns) },
   2093      .resetfn = arm_cp_reset_ignore },
   2094    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
   2095      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
   2096      .access = PL1_RW, .accessfn = access_tvm_trvm,
   2097      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
   2098                             offsetof(CPUARMState, cp15.mair1_ns) },
   2099      .resetfn = arm_cp_reset_ignore },
   2100    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
   2101      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
   2102      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
   2103    /* 32 bit ITLB invalidates */
   2104    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
   2105      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2106      .writefn = tlbiall_write },
   2107    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
   2108      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2109      .writefn = tlbimva_write },
   2110    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
   2111      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2112      .writefn = tlbiasid_write },
   2113    /* 32 bit DTLB invalidates */
   2114    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
   2115      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2116      .writefn = tlbiall_write },
   2117    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
   2118      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2119      .writefn = tlbimva_write },
   2120    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
   2121      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2122      .writefn = tlbiasid_write },
   2123    /* 32 bit TLB invalidates */
   2124    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
   2125      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2126      .writefn = tlbiall_write },
   2127    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
   2128      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2129      .writefn = tlbimva_write },
   2130    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
   2131      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2132      .writefn = tlbiasid_write },
   2133    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
   2134      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2135      .writefn = tlbimvaa_write },
   2136    REGINFO_SENTINEL
   2137};
   2138
   2139static const ARMCPRegInfo v7mp_cp_reginfo[] = {
   2140    /* 32 bit TLB invalidates, Inner Shareable */
   2141    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
   2142      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2143      .writefn = tlbiall_is_write },
   2144    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
   2145      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2146      .writefn = tlbimva_is_write },
   2147    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
   2148      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2149      .writefn = tlbiasid_is_write },
   2150    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
   2151      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2152      .writefn = tlbimvaa_is_write },
   2153    REGINFO_SENTINEL
   2154};
   2155
   2156static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
   2157    /* PMOVSSET is not implemented in v7 before v7ve */
   2158    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
   2159      .access = PL0_RW, .accessfn = pmreg_access,
   2160      .type = ARM_CP_ALIAS | ARM_CP_IO,
   2161      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
   2162      .writefn = pmovsset_write,
   2163      .raw_writefn = raw_write },
   2164    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
   2165      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
   2166      .access = PL0_RW, .accessfn = pmreg_access,
   2167      .type = ARM_CP_ALIAS | ARM_CP_IO,
   2168      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
   2169      .writefn = pmovsset_write,
   2170      .raw_writefn = raw_write },
   2171    REGINFO_SENTINEL
   2172};
   2173
   2174static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2175                        uint64_t value)
   2176{
   2177    value &= 1;
   2178    env->teecr = value;
   2179}
   2180
   2181static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2182                                   bool isread)
   2183{
   2184    /*
   2185     * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
   2186     * at all, so we don't need to check whether we're v8A.
   2187     */
   2188    if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
   2189        (env->cp15.hstr_el2 & HSTR_TTEE)) {
   2190        return CP_ACCESS_TRAP_EL2;
   2191    }
   2192    return CP_ACCESS_OK;
   2193}
   2194
   2195static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2196                                    bool isread)
   2197{
   2198    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
   2199        return CP_ACCESS_TRAP;
   2200    }
   2201    return teecr_access(env, ri, isread);
   2202}
   2203
   2204static const ARMCPRegInfo t2ee_cp_reginfo[] = {
   2205    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
   2206      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
   2207      .resetvalue = 0,
   2208      .writefn = teecr_write, .accessfn = teecr_access },
   2209    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
   2210      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
   2211      .accessfn = teehbr_access, .resetvalue = 0 },
   2212    REGINFO_SENTINEL
   2213};
   2214
   2215static const ARMCPRegInfo v6k_cp_reginfo[] = {
   2216    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
   2217      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
   2218      .access = PL0_RW,
   2219      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
   2220    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
   2221      .access = PL0_RW,
   2222      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
   2223                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
   2224      .resetfn = arm_cp_reset_ignore },
   2225    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
   2226      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
   2227      .access = PL0_R|PL1_W,
   2228      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
   2229      .resetvalue = 0},
   2230    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
   2231      .access = PL0_R|PL1_W,
   2232      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
   2233                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
   2234      .resetfn = arm_cp_reset_ignore },
   2235    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
   2236      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
   2237      .access = PL1_RW,
   2238      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
   2239    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
   2240      .access = PL1_RW,
   2241      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
   2242                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
   2243      .resetvalue = 0 },
   2244    REGINFO_SENTINEL
   2245};
   2246
   2247#ifndef CONFIG_USER_ONLY
   2248
   2249static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2250                                       bool isread)
   2251{
   2252    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
   2253     * Writable only at the highest implemented exception level.
   2254     */
   2255    int el = arm_current_el(env);
   2256    uint64_t hcr;
   2257    uint32_t cntkctl;
   2258
   2259    switch (el) {
   2260    case 0:
   2261        hcr = arm_hcr_el2_eff(env);
   2262        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2263            cntkctl = env->cp15.cnthctl_el2;
   2264        } else {
   2265            cntkctl = env->cp15.c14_cntkctl;
   2266        }
   2267        if (!extract32(cntkctl, 0, 2)) {
   2268            return CP_ACCESS_TRAP;
   2269        }
   2270        break;
   2271    case 1:
   2272        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
   2273            arm_is_secure_below_el3(env)) {
   2274            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
   2275            return CP_ACCESS_TRAP_UNCATEGORIZED;
   2276        }
   2277        break;
   2278    case 2:
   2279    case 3:
   2280        break;
   2281    }
   2282
   2283    if (!isread && el < arm_highest_el(env)) {
   2284        return CP_ACCESS_TRAP_UNCATEGORIZED;
   2285    }
   2286
   2287    return CP_ACCESS_OK;
   2288}
   2289
   2290static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
   2291                                        bool isread)
   2292{
   2293    unsigned int cur_el = arm_current_el(env);
   2294    bool has_el2 = arm_is_el2_enabled(env);
   2295    uint64_t hcr = arm_hcr_el2_eff(env);
   2296
   2297    switch (cur_el) {
   2298    case 0:
   2299        /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
   2300        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2301            return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
   2302                    ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
   2303        }
   2304
   2305        /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
   2306        if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
   2307            return CP_ACCESS_TRAP;
   2308        }
   2309
   2310        /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
   2311        if (hcr & HCR_E2H) {
   2312            if (timeridx == GTIMER_PHYS &&
   2313                !extract32(env->cp15.cnthctl_el2, 10, 1)) {
   2314                return CP_ACCESS_TRAP_EL2;
   2315            }
   2316        } else {
   2317            /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
   2318            if (has_el2 && timeridx == GTIMER_PHYS &&
   2319                !extract32(env->cp15.cnthctl_el2, 1, 1)) {
   2320                return CP_ACCESS_TRAP_EL2;
   2321            }
   2322        }
   2323        break;
   2324
   2325    case 1:
   2326        /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
   2327        if (has_el2 && timeridx == GTIMER_PHYS &&
   2328            (hcr & HCR_E2H
   2329             ? !extract32(env->cp15.cnthctl_el2, 10, 1)
   2330             : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
   2331            return CP_ACCESS_TRAP_EL2;
   2332        }
   2333        break;
   2334    }
   2335    return CP_ACCESS_OK;
   2336}
   2337
   2338static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
   2339                                      bool isread)
   2340{
   2341    unsigned int cur_el = arm_current_el(env);
   2342    bool has_el2 = arm_is_el2_enabled(env);
   2343    uint64_t hcr = arm_hcr_el2_eff(env);
   2344
   2345    switch (cur_el) {
   2346    case 0:
   2347        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2348            /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
   2349            return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
   2350                    ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
   2351        }
   2352
   2353        /*
   2354         * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
   2355         * EL0 if EL0[PV]TEN is zero.
   2356         */
   2357        if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
   2358            return CP_ACCESS_TRAP;
   2359        }
   2360        /* fall through */
   2361
   2362    case 1:
   2363        if (has_el2 && timeridx == GTIMER_PHYS) {
   2364            if (hcr & HCR_E2H) {
   2365                /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
   2366                if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
   2367                    return CP_ACCESS_TRAP_EL2;
   2368                }
   2369            } else {
   2370                /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
   2371                if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
   2372                    return CP_ACCESS_TRAP_EL2;
   2373                }
   2374            }
   2375        }
   2376        break;
   2377    }
   2378    return CP_ACCESS_OK;
   2379}
   2380
   2381static CPAccessResult gt_pct_access(CPUARMState *env,
   2382                                    const ARMCPRegInfo *ri,
   2383                                    bool isread)
   2384{
   2385    return gt_counter_access(env, GTIMER_PHYS, isread);
   2386}
   2387
   2388static CPAccessResult gt_vct_access(CPUARMState *env,
   2389                                    const ARMCPRegInfo *ri,
   2390                                    bool isread)
   2391{
   2392    return gt_counter_access(env, GTIMER_VIRT, isread);
   2393}
   2394
   2395static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2396                                       bool isread)
   2397{
   2398    return gt_timer_access(env, GTIMER_PHYS, isread);
   2399}
   2400
   2401static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2402                                       bool isread)
   2403{
   2404    return gt_timer_access(env, GTIMER_VIRT, isread);
   2405}
   2406
   2407static CPAccessResult gt_stimer_access(CPUARMState *env,
   2408                                       const ARMCPRegInfo *ri,
   2409                                       bool isread)
   2410{
   2411    /* The AArch64 register view of the secure physical timer is
   2412     * always accessible from EL3, and configurably accessible from
   2413     * Secure EL1.
   2414     */
   2415    switch (arm_current_el(env)) {
   2416    case 1:
   2417        if (!arm_is_secure(env)) {
   2418            return CP_ACCESS_TRAP;
   2419        }
   2420        if (!(env->cp15.scr_el3 & SCR_ST)) {
   2421            return CP_ACCESS_TRAP_EL3;
   2422        }
   2423        return CP_ACCESS_OK;
   2424    case 0:
   2425    case 2:
   2426        return CP_ACCESS_TRAP;
   2427    case 3:
   2428        return CP_ACCESS_OK;
   2429    default:
   2430        g_assert_not_reached();
   2431    }
   2432}
   2433
   2434static uint64_t gt_get_countervalue(CPUARMState *env)
   2435{
   2436    ARMCPU *cpu = env_archcpu(env);
   2437
   2438    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
   2439}
   2440
   2441static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
   2442{
   2443    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
   2444
   2445    if (gt->ctl & 1) {
   2446        /* Timer enabled: calculate and set current ISTATUS, irq, and
   2447         * reset timer to when ISTATUS next has to change
   2448         */
   2449        uint64_t offset = timeridx == GTIMER_VIRT ?
   2450                                      cpu->env.cp15.cntvoff_el2 : 0;
   2451        uint64_t count = gt_get_countervalue(&cpu->env);
   2452        /* Note that this must be unsigned 64 bit arithmetic: */
   2453        int istatus = count - offset >= gt->cval;
   2454        uint64_t nexttick;
   2455        int irqstate;
   2456
   2457        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
   2458
   2459        irqstate = (istatus && !(gt->ctl & 2));
   2460        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
   2461
   2462        if (istatus) {
   2463            /* Next transition is when count rolls back over to zero */
   2464            nexttick = UINT64_MAX;
   2465        } else {
   2466            /* Next transition is when we hit cval */
   2467            nexttick = gt->cval + offset;
   2468        }
   2469        /* Note that the desired next expiry time might be beyond the
   2470         * signed-64-bit range of a QEMUTimer -- in this case we just
   2471         * set the timer for as far in the future as possible. When the
   2472         * timer expires we will reset the timer for any remaining period.
   2473         */
   2474        if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
   2475            timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
   2476        } else {
   2477            timer_mod(cpu->gt_timer[timeridx], nexttick);
   2478        }
   2479        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
   2480    } else {
   2481        /* Timer disabled: ISTATUS and timer output always clear */
   2482        gt->ctl &= ~4;
   2483        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
   2484        timer_del(cpu->gt_timer[timeridx]);
   2485        trace_arm_gt_recalc_disabled(timeridx);
   2486    }
   2487}
   2488
   2489static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
   2490                           int timeridx)
   2491{
   2492    ARMCPU *cpu = env_archcpu(env);
   2493
   2494    timer_del(cpu->gt_timer[timeridx]);
   2495}
   2496
   2497static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2498{
   2499    return gt_get_countervalue(env);
   2500}
   2501
   2502static uint64_t gt_virt_cnt_offset(CPUARMState *env)
   2503{
   2504    uint64_t hcr;
   2505
   2506    switch (arm_current_el(env)) {
   2507    case 2:
   2508        hcr = arm_hcr_el2_eff(env);
   2509        if (hcr & HCR_E2H) {
   2510            return 0;
   2511        }
   2512        break;
   2513    case 0:
   2514        hcr = arm_hcr_el2_eff(env);
   2515        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2516            return 0;
   2517        }
   2518        break;
   2519    }
   2520
   2521    return env->cp15.cntvoff_el2;
   2522}
   2523
   2524static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2525{
   2526    return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
   2527}
   2528
   2529static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2530                          int timeridx,
   2531                          uint64_t value)
   2532{
   2533    trace_arm_gt_cval_write(timeridx, value);
   2534    env->cp15.c14_timer[timeridx].cval = value;
   2535    gt_recalc_timer(env_archcpu(env), timeridx);
   2536}
   2537
   2538static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
   2539                             int timeridx)
   2540{
   2541    uint64_t offset = 0;
   2542
   2543    switch (timeridx) {
   2544    case GTIMER_VIRT:
   2545    case GTIMER_HYPVIRT:
   2546        offset = gt_virt_cnt_offset(env);
   2547        break;
   2548    }
   2549
   2550    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
   2551                      (gt_get_countervalue(env) - offset));
   2552}
   2553
   2554static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2555                          int timeridx,
   2556                          uint64_t value)
   2557{
   2558    uint64_t offset = 0;
   2559
   2560    switch (timeridx) {
   2561    case GTIMER_VIRT:
   2562    case GTIMER_HYPVIRT:
   2563        offset = gt_virt_cnt_offset(env);
   2564        break;
   2565    }
   2566
   2567    trace_arm_gt_tval_write(timeridx, value);
   2568    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
   2569                                         sextract64(value, 0, 32);
   2570    gt_recalc_timer(env_archcpu(env), timeridx);
   2571}
   2572
   2573static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2574                         int timeridx,
   2575                         uint64_t value)
   2576{
   2577    ARMCPU *cpu = env_archcpu(env);
   2578    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
   2579
   2580    trace_arm_gt_ctl_write(timeridx, value);
   2581    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
   2582    if ((oldval ^ value) & 1) {
   2583        /* Enable toggled */
   2584        gt_recalc_timer(cpu, timeridx);
   2585    } else if ((oldval ^ value) & 2) {
   2586        /* IMASK toggled: don't need to recalculate,
   2587         * just set the interrupt line based on ISTATUS
   2588         */
   2589        int irqstate = (oldval & 4) && !(value & 2);
   2590
   2591        trace_arm_gt_imask_toggle(timeridx, irqstate);
   2592        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
   2593    }
   2594}
   2595
   2596static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2597{
   2598    gt_timer_reset(env, ri, GTIMER_PHYS);
   2599}
   2600
   2601static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2602                               uint64_t value)
   2603{
   2604    gt_cval_write(env, ri, GTIMER_PHYS, value);
   2605}
   2606
   2607static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2608{
   2609    return gt_tval_read(env, ri, GTIMER_PHYS);
   2610}
   2611
   2612static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2613                               uint64_t value)
   2614{
   2615    gt_tval_write(env, ri, GTIMER_PHYS, value);
   2616}
   2617
   2618static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2619                              uint64_t value)
   2620{
   2621    gt_ctl_write(env, ri, GTIMER_PHYS, value);
   2622}
   2623
   2624static int gt_phys_redir_timeridx(CPUARMState *env)
   2625{
   2626    switch (arm_mmu_idx(env)) {
   2627    case ARMMMUIdx_E20_0:
   2628    case ARMMMUIdx_E20_2:
   2629    case ARMMMUIdx_E20_2_PAN:
   2630    case ARMMMUIdx_SE20_0:
   2631    case ARMMMUIdx_SE20_2:
   2632    case ARMMMUIdx_SE20_2_PAN:
   2633        return GTIMER_HYP;
   2634    default:
   2635        return GTIMER_PHYS;
   2636    }
   2637}
   2638
   2639static int gt_virt_redir_timeridx(CPUARMState *env)
   2640{
   2641    switch (arm_mmu_idx(env)) {
   2642    case ARMMMUIdx_E20_0:
   2643    case ARMMMUIdx_E20_2:
   2644    case ARMMMUIdx_E20_2_PAN:
   2645    case ARMMMUIdx_SE20_0:
   2646    case ARMMMUIdx_SE20_2:
   2647    case ARMMMUIdx_SE20_2_PAN:
   2648        return GTIMER_HYPVIRT;
   2649    default:
   2650        return GTIMER_VIRT;
   2651    }
   2652}
   2653
   2654static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
   2655                                        const ARMCPRegInfo *ri)
   2656{
   2657    int timeridx = gt_phys_redir_timeridx(env);
   2658    return env->cp15.c14_timer[timeridx].cval;
   2659}
   2660
   2661static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2662                                     uint64_t value)
   2663{
   2664    int timeridx = gt_phys_redir_timeridx(env);
   2665    gt_cval_write(env, ri, timeridx, value);
   2666}
   2667
   2668static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
   2669                                        const ARMCPRegInfo *ri)
   2670{
   2671    int timeridx = gt_phys_redir_timeridx(env);
   2672    return gt_tval_read(env, ri, timeridx);
   2673}
   2674
   2675static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2676                                     uint64_t value)
   2677{
   2678    int timeridx = gt_phys_redir_timeridx(env);
   2679    gt_tval_write(env, ri, timeridx, value);
   2680}
   2681
   2682static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
   2683                                       const ARMCPRegInfo *ri)
   2684{
   2685    int timeridx = gt_phys_redir_timeridx(env);
   2686    return env->cp15.c14_timer[timeridx].ctl;
   2687}
   2688
   2689static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2690                                    uint64_t value)
   2691{
   2692    int timeridx = gt_phys_redir_timeridx(env);
   2693    gt_ctl_write(env, ri, timeridx, value);
   2694}
   2695
   2696static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2697{
   2698    gt_timer_reset(env, ri, GTIMER_VIRT);
   2699}
   2700
   2701static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2702                               uint64_t value)
   2703{
   2704    gt_cval_write(env, ri, GTIMER_VIRT, value);
   2705}
   2706
   2707static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2708{
   2709    return gt_tval_read(env, ri, GTIMER_VIRT);
   2710}
   2711
   2712static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2713                               uint64_t value)
   2714{
   2715    gt_tval_write(env, ri, GTIMER_VIRT, value);
   2716}
   2717
   2718static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2719                              uint64_t value)
   2720{
   2721    gt_ctl_write(env, ri, GTIMER_VIRT, value);
   2722}
   2723
   2724static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2725                              uint64_t value)
   2726{
   2727    ARMCPU *cpu = env_archcpu(env);
   2728
   2729    trace_arm_gt_cntvoff_write(value);
   2730    raw_write(env, ri, value);
   2731    gt_recalc_timer(cpu, GTIMER_VIRT);
   2732}
   2733
   2734static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
   2735                                        const ARMCPRegInfo *ri)
   2736{
   2737    int timeridx = gt_virt_redir_timeridx(env);
   2738    return env->cp15.c14_timer[timeridx].cval;
   2739}
   2740
   2741static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2742                                     uint64_t value)
   2743{
   2744    int timeridx = gt_virt_redir_timeridx(env);
   2745    gt_cval_write(env, ri, timeridx, value);
   2746}
   2747
   2748static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
   2749                                        const ARMCPRegInfo *ri)
   2750{
   2751    int timeridx = gt_virt_redir_timeridx(env);
   2752    return gt_tval_read(env, ri, timeridx);
   2753}
   2754
   2755static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2756                                     uint64_t value)
   2757{
   2758    int timeridx = gt_virt_redir_timeridx(env);
   2759    gt_tval_write(env, ri, timeridx, value);
   2760}
   2761
   2762static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
   2763                                       const ARMCPRegInfo *ri)
   2764{
   2765    int timeridx = gt_virt_redir_timeridx(env);
   2766    return env->cp15.c14_timer[timeridx].ctl;
   2767}
   2768
   2769static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2770                                    uint64_t value)
   2771{
   2772    int timeridx = gt_virt_redir_timeridx(env);
   2773    gt_ctl_write(env, ri, timeridx, value);
   2774}
   2775
   2776static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2777{
   2778    gt_timer_reset(env, ri, GTIMER_HYP);
   2779}
   2780
   2781static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2782                              uint64_t value)
   2783{
   2784    gt_cval_write(env, ri, GTIMER_HYP, value);
   2785}
   2786
   2787static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2788{
   2789    return gt_tval_read(env, ri, GTIMER_HYP);
   2790}
   2791
   2792static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2793                              uint64_t value)
   2794{
   2795    gt_tval_write(env, ri, GTIMER_HYP, value);
   2796}
   2797
   2798static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2799                              uint64_t value)
   2800{
   2801    gt_ctl_write(env, ri, GTIMER_HYP, value);
   2802}
   2803
   2804static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2805{
   2806    gt_timer_reset(env, ri, GTIMER_SEC);
   2807}
   2808
   2809static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2810                              uint64_t value)
   2811{
   2812    gt_cval_write(env, ri, GTIMER_SEC, value);
   2813}
   2814
   2815static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2816{
   2817    return gt_tval_read(env, ri, GTIMER_SEC);
   2818}
   2819
   2820static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2821                              uint64_t value)
   2822{
   2823    gt_tval_write(env, ri, GTIMER_SEC, value);
   2824}
   2825
   2826static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2827                              uint64_t value)
   2828{
   2829    gt_ctl_write(env, ri, GTIMER_SEC, value);
   2830}
   2831
   2832static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2833{
   2834    gt_timer_reset(env, ri, GTIMER_HYPVIRT);
   2835}
   2836
   2837static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2838                             uint64_t value)
   2839{
   2840    gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
   2841}
   2842
   2843static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2844{
   2845    return gt_tval_read(env, ri, GTIMER_HYPVIRT);
   2846}
   2847
   2848static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2849                             uint64_t value)
   2850{
   2851    gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
   2852}
   2853
   2854static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2855                            uint64_t value)
   2856{
   2857    gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
   2858}
   2859
   2860void arm_gt_ptimer_cb(void *opaque)
   2861{
   2862    ARMCPU *cpu = opaque;
   2863
   2864    gt_recalc_timer(cpu, GTIMER_PHYS);
   2865}
   2866
   2867void arm_gt_vtimer_cb(void *opaque)
   2868{
   2869    ARMCPU *cpu = opaque;
   2870
   2871    gt_recalc_timer(cpu, GTIMER_VIRT);
   2872}
   2873
   2874void arm_gt_htimer_cb(void *opaque)
   2875{
   2876    ARMCPU *cpu = opaque;
   2877
   2878    gt_recalc_timer(cpu, GTIMER_HYP);
   2879}
   2880
   2881void arm_gt_stimer_cb(void *opaque)
   2882{
   2883    ARMCPU *cpu = opaque;
   2884
   2885    gt_recalc_timer(cpu, GTIMER_SEC);
   2886}
   2887
   2888void arm_gt_hvtimer_cb(void *opaque)
   2889{
   2890    ARMCPU *cpu = opaque;
   2891
   2892    gt_recalc_timer(cpu, GTIMER_HYPVIRT);
   2893}
   2894
   2895static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
   2896{
   2897    ARMCPU *cpu = env_archcpu(env);
   2898
   2899    cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
   2900}
   2901
   2902static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
   2903    /* Note that CNTFRQ is purely reads-as-written for the benefit
   2904     * of software; writing it doesn't actually change the timer frequency.
   2905     * Our reset value matches the fixed frequency we implement the timer at.
   2906     */
   2907    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
   2908      .type = ARM_CP_ALIAS,
   2909      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
   2910      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
   2911    },
   2912    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
   2913      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
   2914      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
   2915      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
   2916      .resetfn = arm_gt_cntfrq_reset,
   2917    },
   2918    /* overall control: mostly access permissions */
   2919    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
   2920      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
   2921      .access = PL1_RW,
   2922      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
   2923      .resetvalue = 0,
   2924    },
   2925    /* per-timer control */
   2926    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
   2927      .secure = ARM_CP_SECSTATE_NS,
   2928      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
   2929      .accessfn = gt_ptimer_access,
   2930      .fieldoffset = offsetoflow32(CPUARMState,
   2931                                   cp15.c14_timer[GTIMER_PHYS].ctl),
   2932      .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
   2933      .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
   2934    },
   2935    { .name = "CNTP_CTL_S",
   2936      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
   2937      .secure = ARM_CP_SECSTATE_S,
   2938      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
   2939      .accessfn = gt_ptimer_access,
   2940      .fieldoffset = offsetoflow32(CPUARMState,
   2941                                   cp15.c14_timer[GTIMER_SEC].ctl),
   2942      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
   2943    },
   2944    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
   2945      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
   2946      .type = ARM_CP_IO, .access = PL0_RW,
   2947      .accessfn = gt_ptimer_access,
   2948      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
   2949      .resetvalue = 0,
   2950      .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
   2951      .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
   2952    },
   2953    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
   2954      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
   2955      .accessfn = gt_vtimer_access,
   2956      .fieldoffset = offsetoflow32(CPUARMState,
   2957                                   cp15.c14_timer[GTIMER_VIRT].ctl),
   2958      .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
   2959      .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
   2960    },
   2961    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
   2962      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
   2963      .type = ARM_CP_IO, .access = PL0_RW,
   2964      .accessfn = gt_vtimer_access,
   2965      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
   2966      .resetvalue = 0,
   2967      .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
   2968      .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
   2969    },
   2970    /* TimerValue views: a 32 bit downcounting view of the underlying state */
   2971    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
   2972      .secure = ARM_CP_SECSTATE_NS,
   2973      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   2974      .accessfn = gt_ptimer_access,
   2975      .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
   2976    },
   2977    { .name = "CNTP_TVAL_S",
   2978      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
   2979      .secure = ARM_CP_SECSTATE_S,
   2980      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   2981      .accessfn = gt_ptimer_access,
   2982      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
   2983    },
   2984    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
   2985      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
   2986      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   2987      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
   2988      .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
   2989    },
   2990    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
   2991      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   2992      .accessfn = gt_vtimer_access,
   2993      .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
   2994    },
   2995    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
   2996      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
   2997      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   2998      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
   2999      .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
   3000    },
   3001    /* The counter itself */
   3002    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
   3003      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
   3004      .accessfn = gt_pct_access,
   3005      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
   3006    },
   3007    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
   3008      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
   3009      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   3010      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
   3011    },
   3012    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
   3013      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
   3014      .accessfn = gt_vct_access,
   3015      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
   3016    },
   3017    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
   3018      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
   3019      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   3020      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
   3021    },
   3022    /* Comparison value, indicating when the timer goes off */
   3023    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
   3024      .secure = ARM_CP_SECSTATE_NS,
   3025      .access = PL0_RW,
   3026      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
   3027      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
   3028      .accessfn = gt_ptimer_access,
   3029      .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
   3030      .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
   3031    },
   3032    { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
   3033      .secure = ARM_CP_SECSTATE_S,
   3034      .access = PL0_RW,
   3035      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
   3036      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
   3037      .accessfn = gt_ptimer_access,
   3038      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
   3039    },
   3040    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
   3041      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
   3042      .access = PL0_RW,
   3043      .type = ARM_CP_IO,
   3044      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
   3045      .resetvalue = 0, .accessfn = gt_ptimer_access,
   3046      .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
   3047      .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
   3048    },
   3049    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
   3050      .access = PL0_RW,
   3051      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
   3052      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
   3053      .accessfn = gt_vtimer_access,
   3054      .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
   3055      .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
   3056    },
   3057    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
   3058      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
   3059      .access = PL0_RW,
   3060      .type = ARM_CP_IO,
   3061      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
   3062      .resetvalue = 0, .accessfn = gt_vtimer_access,
   3063      .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
   3064      .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
   3065    },
   3066    /* Secure timer -- this is actually restricted to only EL3
   3067     * and configurably Secure-EL1 via the accessfn.
   3068     */
   3069    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
   3070      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
   3071      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
   3072      .accessfn = gt_stimer_access,
   3073      .readfn = gt_sec_tval_read,
   3074      .writefn = gt_sec_tval_write,
   3075      .resetfn = gt_sec_timer_reset,
   3076    },
   3077    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
   3078      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
   3079      .type = ARM_CP_IO, .access = PL1_RW,
   3080      .accessfn = gt_stimer_access,
   3081      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
   3082      .resetvalue = 0,
   3083      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
   3084    },
   3085    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
   3086      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
   3087      .type = ARM_CP_IO, .access = PL1_RW,
   3088      .accessfn = gt_stimer_access,
   3089      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
   3090      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
   3091    },
   3092    REGINFO_SENTINEL
   3093};
   3094
   3095static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
   3096                                 bool isread)
   3097{
   3098    if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
   3099        return CP_ACCESS_TRAP;
   3100    }
   3101    return CP_ACCESS_OK;
   3102}
   3103
   3104#else
   3105
   3106/* In user-mode most of the generic timer registers are inaccessible
   3107 * however modern kernels (4.12+) allow access to cntvct_el0
   3108 */
   3109
   3110static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3111{
   3112    ARMCPU *cpu = env_archcpu(env);
   3113
   3114    /* Currently we have no support for QEMUTimer in linux-user so we
   3115     * can't call gt_get_countervalue(env), instead we directly
   3116     * call the lower level functions.
   3117     */
   3118    return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
   3119}
   3120
   3121static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
   3122    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
   3123      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
   3124      .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
   3125      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
   3126      .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
   3127    },
   3128    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
   3129      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
   3130      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   3131      .readfn = gt_virt_cnt_read,
   3132    },
   3133    REGINFO_SENTINEL
   3134};
   3135
   3136#endif
   3137
   3138static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   3139{
   3140    if (arm_feature(env, ARM_FEATURE_LPAE)) {
   3141        raw_write(env, ri, value);
   3142    } else if (arm_feature(env, ARM_FEATURE_V7)) {
   3143        raw_write(env, ri, value & 0xfffff6ff);
   3144    } else {
   3145        raw_write(env, ri, value & 0xfffff1ff);
   3146    }
   3147}
   3148
   3149#ifndef CONFIG_USER_ONLY
   3150/* get_phys_addr() isn't present for user-mode-only targets */
   3151
   3152static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
   3153                                 bool isread)
   3154{
   3155    if (ri->opc2 & 4) {
   3156        /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
   3157         * Secure EL1 (which can only happen if EL3 is AArch64).
   3158         * They are simply UNDEF if executed from NS EL1.
   3159         * They function normally from EL2 or EL3.
   3160         */
   3161        if (arm_current_el(env) == 1) {
   3162            if (arm_is_secure_below_el3(env)) {
   3163                if (env->cp15.scr_el3 & SCR_EEL2) {
   3164                    return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
   3165                }
   3166                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
   3167            }
   3168            return CP_ACCESS_TRAP_UNCATEGORIZED;
   3169        }
   3170    }
   3171    return CP_ACCESS_OK;
   3172}
   3173
   3174#ifdef CONFIG_TCG
   3175static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
   3176                             MMUAccessType access_type, ARMMMUIdx mmu_idx)
   3177{
   3178    hwaddr phys_addr;
   3179    target_ulong page_size;
   3180    int prot;
   3181    bool ret;
   3182    uint64_t par64;
   3183    bool format64 = false;
   3184    MemTxAttrs attrs = {};
   3185    ARMMMUFaultInfo fi = {};
   3186    ARMCacheAttrs cacheattrs = {};
   3187
   3188    ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
   3189                        &prot, &page_size, &fi, &cacheattrs);
   3190
   3191    if (ret) {
   3192        /*
   3193         * Some kinds of translation fault must cause exceptions rather
   3194         * than being reported in the PAR.
   3195         */
   3196        int current_el = arm_current_el(env);
   3197        int target_el;
   3198        uint32_t syn, fsr, fsc;
   3199        bool take_exc = false;
   3200
   3201        if (fi.s1ptw && current_el == 1
   3202            && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
   3203            /*
   3204             * Synchronous stage 2 fault on an access made as part of the
   3205             * translation table walk for AT S1E0* or AT S1E1* insn
   3206             * executed from NS EL1. If this is a synchronous external abort
   3207             * and SCR_EL3.EA == 1, then we take a synchronous external abort
   3208             * to EL3. Otherwise the fault is taken as an exception to EL2,
   3209             * and HPFAR_EL2 holds the faulting IPA.
   3210             */
   3211            if (fi.type == ARMFault_SyncExternalOnWalk &&
   3212                (env->cp15.scr_el3 & SCR_EA)) {
   3213                target_el = 3;
   3214            } else {
   3215                env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
   3216                if (arm_is_secure_below_el3(env) && fi.s1ns) {
   3217                    env->cp15.hpfar_el2 |= HPFAR_NS;
   3218                }
   3219                target_el = 2;
   3220            }
   3221            take_exc = true;
   3222        } else if (fi.type == ARMFault_SyncExternalOnWalk) {
   3223            /*
   3224             * Synchronous external aborts during a translation table walk
   3225             * are taken as Data Abort exceptions.
   3226             */
   3227            if (fi.stage2) {
   3228                if (current_el == 3) {
   3229                    target_el = 3;
   3230                } else {
   3231                    target_el = 2;
   3232                }
   3233            } else {
   3234                target_el = exception_target_el(env);
   3235            }
   3236            take_exc = true;
   3237        }
   3238
   3239        if (take_exc) {
   3240            /* Construct FSR and FSC using same logic as arm_deliver_fault() */
   3241            if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
   3242                arm_s1_regime_using_lpae_format(env, mmu_idx)) {
   3243                fsr = arm_fi_to_lfsc(&fi);
   3244                fsc = extract32(fsr, 0, 6);
   3245            } else {
   3246                fsr = arm_fi_to_sfsc(&fi);
   3247                fsc = 0x3f;
   3248            }
   3249            /*
   3250             * Report exception with ESR indicating a fault due to a
   3251             * translation table walk for a cache maintenance instruction.
   3252             */
   3253            syn = syn_data_abort_no_iss(current_el == target_el, 0,
   3254                                        fi.ea, 1, fi.s1ptw, 1, fsc);
   3255            env->exception.vaddress = value;
   3256            env->exception.fsr = fsr;
   3257            raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
   3258        }
   3259    }
   3260
   3261    if (is_a64(env)) {
   3262        format64 = true;
   3263    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
   3264        /*
   3265         * ATS1Cxx:
   3266         * * TTBCR.EAE determines whether the result is returned using the
   3267         *   32-bit or the 64-bit PAR format
   3268         * * Instructions executed in Hyp mode always use the 64bit format
   3269         *
   3270         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
   3271         * * The Non-secure TTBCR.EAE bit is set to 1
   3272         * * The implementation includes EL2, and the value of HCR.VM is 1
   3273         *
   3274         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
   3275         *
   3276         * ATS1Hx always uses the 64bit format.
   3277         */
   3278        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
   3279
   3280        if (arm_feature(env, ARM_FEATURE_EL2)) {
   3281            if (mmu_idx == ARMMMUIdx_E10_0 ||
   3282                mmu_idx == ARMMMUIdx_E10_1 ||
   3283                mmu_idx == ARMMMUIdx_E10_1_PAN) {
   3284                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
   3285            } else {
   3286                format64 |= arm_current_el(env) == 2;
   3287            }
   3288        }
   3289    }
   3290
   3291    if (format64) {
   3292        /* Create a 64-bit PAR */
   3293        par64 = (1 << 11); /* LPAE bit always set */
   3294        if (!ret) {
   3295            par64 |= phys_addr & ~0xfffULL;
   3296            if (!attrs.secure) {
   3297                par64 |= (1 << 9); /* NS */
   3298            }
   3299            par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
   3300            par64 |= cacheattrs.shareability << 7; /* SH */
   3301        } else {
   3302            uint32_t fsr = arm_fi_to_lfsc(&fi);
   3303
   3304            par64 |= 1; /* F */
   3305            par64 |= (fsr & 0x3f) << 1; /* FS */
   3306            if (fi.stage2) {
   3307                par64 |= (1 << 9); /* S */
   3308            }
   3309            if (fi.s1ptw) {
   3310                par64 |= (1 << 8); /* PTW */
   3311            }
   3312        }
   3313    } else {
   3314        /* fsr is a DFSR/IFSR value for the short descriptor
   3315         * translation table format (with WnR always clear).
   3316         * Convert it to a 32-bit PAR.
   3317         */
   3318        if (!ret) {
   3319            /* We do not set any attribute bits in the PAR */
   3320            if (page_size == (1 << 24)
   3321                && arm_feature(env, ARM_FEATURE_V7)) {
   3322                par64 = (phys_addr & 0xff000000) | (1 << 1);
   3323            } else {
   3324                par64 = phys_addr & 0xfffff000;
   3325            }
   3326            if (!attrs.secure) {
   3327                par64 |= (1 << 9); /* NS */
   3328            }
   3329        } else {
   3330            uint32_t fsr = arm_fi_to_sfsc(&fi);
   3331
   3332            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
   3333                    ((fsr & 0xf) << 1) | 1;
   3334        }
   3335    }
   3336    return par64;
   3337}
   3338#endif /* CONFIG_TCG */
   3339
   3340static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   3341{
   3342#ifdef CONFIG_TCG
   3343    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
   3344    uint64_t par64;
   3345    ARMMMUIdx mmu_idx;
   3346    int el = arm_current_el(env);
   3347    bool secure = arm_is_secure_below_el3(env);
   3348
   3349    switch (ri->opc2 & 6) {
   3350    case 0:
   3351        /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
   3352        switch (el) {
   3353        case 3:
   3354            mmu_idx = ARMMMUIdx_SE3;
   3355            break;
   3356        case 2:
   3357            g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
   3358            /* fall through */
   3359        case 1:
   3360            if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
   3361                mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
   3362                           : ARMMMUIdx_Stage1_E1_PAN);
   3363            } else {
   3364                mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
   3365            }
   3366            break;
   3367        default:
   3368            g_assert_not_reached();
   3369        }
   3370        break;
   3371    case 2:
   3372        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
   3373        switch (el) {
   3374        case 3:
   3375            mmu_idx = ARMMMUIdx_SE10_0;
   3376            break;
   3377        case 2:
   3378            g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
   3379            mmu_idx = ARMMMUIdx_Stage1_E0;
   3380            break;
   3381        case 1:
   3382            mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
   3383            break;
   3384        default:
   3385            g_assert_not_reached();
   3386        }
   3387        break;
   3388    case 4:
   3389        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
   3390        mmu_idx = ARMMMUIdx_E10_1;
   3391        break;
   3392    case 6:
   3393        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
   3394        mmu_idx = ARMMMUIdx_E10_0;
   3395        break;
   3396    default:
   3397        g_assert_not_reached();
   3398    }
   3399
   3400    par64 = do_ats_write(env, value, access_type, mmu_idx);
   3401
   3402    A32_BANKED_CURRENT_REG_SET(env, par, par64);
   3403#else
   3404    /* Handled by hardware accelerator. */
   3405    g_assert_not_reached();
   3406#endif /* CONFIG_TCG */
   3407}
   3408
   3409static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3410                        uint64_t value)
   3411{
   3412#ifdef CONFIG_TCG
   3413    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
   3414    uint64_t par64;
   3415
   3416    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
   3417
   3418    A32_BANKED_CURRENT_REG_SET(env, par, par64);
   3419#else
   3420    /* Handled by hardware accelerator. */
   3421    g_assert_not_reached();
   3422#endif /* CONFIG_TCG */
   3423}
   3424
   3425static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
   3426                                     bool isread)
   3427{
   3428    if (arm_current_el(env) == 3 &&
   3429        !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
   3430        return CP_ACCESS_TRAP;
   3431    }
   3432    return CP_ACCESS_OK;
   3433}
   3434
   3435static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
   3436                        uint64_t value)
   3437{
   3438#ifdef CONFIG_TCG
   3439    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
   3440    ARMMMUIdx mmu_idx;
   3441    int secure = arm_is_secure_below_el3(env);
   3442
   3443    switch (ri->opc2 & 6) {
   3444    case 0:
   3445        switch (ri->opc1) {
   3446        case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
   3447            if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
   3448                mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
   3449                           : ARMMMUIdx_Stage1_E1_PAN);
   3450            } else {
   3451                mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
   3452            }
   3453            break;
   3454        case 4: /* AT S1E2R, AT S1E2W */
   3455            mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
   3456            break;
   3457        case 6: /* AT S1E3R, AT S1E3W */
   3458            mmu_idx = ARMMMUIdx_SE3;
   3459            break;
   3460        default:
   3461            g_assert_not_reached();
   3462        }
   3463        break;
   3464    case 2: /* AT S1E0R, AT S1E0W */
   3465        mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
   3466        break;
   3467    case 4: /* AT S12E1R, AT S12E1W */
   3468        mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
   3469        break;
   3470    case 6: /* AT S12E0R, AT S12E0W */
   3471        mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
   3472        break;
   3473    default:
   3474        g_assert_not_reached();
   3475    }
   3476
   3477    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
   3478#else
   3479    /* Handled by hardware accelerator. */
   3480    g_assert_not_reached();
   3481#endif /* CONFIG_TCG */
   3482}
   3483#endif
   3484
   3485static const ARMCPRegInfo vapa_cp_reginfo[] = {
   3486    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
   3487      .access = PL1_RW, .resetvalue = 0,
   3488      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
   3489                             offsetoflow32(CPUARMState, cp15.par_ns) },
   3490      .writefn = par_write },
   3491#ifndef CONFIG_USER_ONLY
   3492    /* This underdecoding is safe because the reginfo is NO_RAW. */
   3493    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
   3494      .access = PL1_W, .accessfn = ats_access,
   3495      .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
   3496#endif
   3497    REGINFO_SENTINEL
   3498};
   3499
   3500/* Return basic MPU access permission bits.  */
   3501static uint32_t simple_mpu_ap_bits(uint32_t val)
   3502{
   3503    uint32_t ret;
   3504    uint32_t mask;
   3505    int i;
   3506    ret = 0;
   3507    mask = 3;
   3508    for (i = 0; i < 16; i += 2) {
   3509        ret |= (val >> i) & mask;
   3510        mask <<= 2;
   3511    }
   3512    return ret;
   3513}
   3514
   3515/* Pad basic MPU access permission bits to extended format.  */
   3516static uint32_t extended_mpu_ap_bits(uint32_t val)
   3517{
   3518    uint32_t ret;
   3519    uint32_t mask;
   3520    int i;
   3521    ret = 0;
   3522    mask = 3;
   3523    for (i = 0; i < 16; i += 2) {
   3524        ret |= (val & mask) << i;
   3525        mask <<= 2;
   3526    }
   3527    return ret;
   3528}
   3529
   3530static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3531                                 uint64_t value)
   3532{
   3533    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
   3534}
   3535
   3536static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3537{
   3538    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
   3539}
   3540
   3541static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3542                                 uint64_t value)
   3543{
   3544    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
   3545}
   3546
   3547static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3548{
   3549    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
   3550}
   3551
   3552static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3553{
   3554    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
   3555
   3556    if (!u32p) {
   3557        return 0;
   3558    }
   3559
   3560    u32p += env->pmsav7.rnr[M_REG_NS];
   3561    return *u32p;
   3562}
   3563
   3564static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3565                         uint64_t value)
   3566{
   3567    ARMCPU *cpu = env_archcpu(env);
   3568    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
   3569
   3570    if (!u32p) {
   3571        return;
   3572    }
   3573
   3574    u32p += env->pmsav7.rnr[M_REG_NS];
   3575    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
   3576    *u32p = value;
   3577}
   3578
   3579static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3580                              uint64_t value)
   3581{
   3582    ARMCPU *cpu = env_archcpu(env);
   3583    uint32_t nrgs = cpu->pmsav7_dregion;
   3584
   3585    if (value >= nrgs) {
   3586        qemu_log_mask(LOG_GUEST_ERROR,
   3587                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
   3588                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
   3589        return;
   3590    }
   3591
   3592    raw_write(env, ri, value);
   3593}
   3594
   3595static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
   3596    /* Reset for all these registers is handled in arm_cpu_reset(),
   3597     * because the PMSAv7 is also used by M-profile CPUs, which do
   3598     * not register cpregs but still need the state to be reset.
   3599     */
   3600    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
   3601      .access = PL1_RW, .type = ARM_CP_NO_RAW,
   3602      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
   3603      .readfn = pmsav7_read, .writefn = pmsav7_write,
   3604      .resetfn = arm_cp_reset_ignore },
   3605    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
   3606      .access = PL1_RW, .type = ARM_CP_NO_RAW,
   3607      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
   3608      .readfn = pmsav7_read, .writefn = pmsav7_write,
   3609      .resetfn = arm_cp_reset_ignore },
   3610    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
   3611      .access = PL1_RW, .type = ARM_CP_NO_RAW,
   3612      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
   3613      .readfn = pmsav7_read, .writefn = pmsav7_write,
   3614      .resetfn = arm_cp_reset_ignore },
   3615    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
   3616      .access = PL1_RW,
   3617      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
   3618      .writefn = pmsav7_rgnr_write,
   3619      .resetfn = arm_cp_reset_ignore },
   3620    REGINFO_SENTINEL
   3621};
   3622
   3623static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
   3624    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
   3625      .access = PL1_RW, .type = ARM_CP_ALIAS,
   3626      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
   3627      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
   3628    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
   3629      .access = PL1_RW, .type = ARM_CP_ALIAS,
   3630      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
   3631      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
   3632    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
   3633      .access = PL1_RW,
   3634      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
   3635      .resetvalue = 0, },
   3636    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
   3637      .access = PL1_RW,
   3638      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
   3639      .resetvalue = 0, },
   3640    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
   3641      .access = PL1_RW,
   3642      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
   3643    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
   3644      .access = PL1_RW,
   3645      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
   3646    /* Protection region base and size registers */
   3647    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
   3648      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3649      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
   3650    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
   3651      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3652      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
   3653    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
   3654      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3655      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
   3656    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
   3657      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3658      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
   3659    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
   3660      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3661      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
   3662    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
   3663      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3664      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
   3665    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
   3666      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3667      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
   3668    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
   3669      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3670      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
   3671    REGINFO_SENTINEL
   3672};
   3673
   3674static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3675                                 uint64_t value)
   3676{
   3677    TCR *tcr = raw_ptr(env, ri);
   3678    int maskshift = extract32(value, 0, 3);
   3679
   3680    if (!arm_feature(env, ARM_FEATURE_V8)) {
   3681        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
   3682            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
   3683             * using Long-desciptor translation table format */
   3684            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
   3685        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
   3686            /* In an implementation that includes the Security Extensions
   3687             * TTBCR has additional fields PD0 [4] and PD1 [5] for
   3688             * Short-descriptor translation table format.
   3689             */
   3690            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
   3691        } else {
   3692            value &= TTBCR_N;
   3693        }
   3694    }
   3695
   3696    /* Update the masks corresponding to the TCR bank being written
   3697     * Note that we always calculate mask and base_mask, but
   3698     * they are only used for short-descriptor tables (ie if EAE is 0);
   3699     * for long-descriptor tables the TCR fields are used differently
   3700     * and the mask and base_mask values are meaningless.
   3701     */
   3702    tcr->raw_tcr = value;
   3703    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
   3704    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
   3705}
   3706
   3707static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3708                             uint64_t value)
   3709{
   3710    ARMCPU *cpu = env_archcpu(env);
   3711    TCR *tcr = raw_ptr(env, ri);
   3712
   3713    if (arm_feature(env, ARM_FEATURE_LPAE)) {
   3714        /* With LPAE the TTBCR could result in a change of ASID
   3715         * via the TTBCR.A1 bit, so do a TLB flush.
   3716         */
   3717        tlb_flush(CPU(cpu));
   3718    }
   3719    /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
   3720    value = deposit64(tcr->raw_tcr, 0, 32, value);
   3721    vmsa_ttbcr_raw_write(env, ri, value);
   3722}
   3723
   3724static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   3725{
   3726    TCR *tcr = raw_ptr(env, ri);
   3727
   3728    /* Reset both the TCR as well as the masks corresponding to the bank of
   3729     * the TCR being reset.
   3730     */
   3731    tcr->raw_tcr = 0;
   3732    tcr->mask = 0;
   3733    tcr->base_mask = 0xffffc000u;
   3734}
   3735
   3736static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3737                               uint64_t value)
   3738{
   3739    ARMCPU *cpu = env_archcpu(env);
   3740    TCR *tcr = raw_ptr(env, ri);
   3741
   3742    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
   3743    tlb_flush(CPU(cpu));
   3744    tcr->raw_tcr = value;
   3745}
   3746
   3747static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3748                            uint64_t value)
   3749{
   3750    /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
   3751    if (cpreg_field_is_64bit(ri) &&
   3752        extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
   3753        ARMCPU *cpu = env_archcpu(env);
   3754        tlb_flush(CPU(cpu));
   3755    }
   3756    raw_write(env, ri, value);
   3757}
   3758
   3759static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3760                                    uint64_t value)
   3761{
   3762    /*
   3763     * If we are running with E2&0 regime, then an ASID is active.
   3764     * Flush if that might be changing.  Note we're not checking
   3765     * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
   3766     * holds the active ASID, only checking the field that might.
   3767     */
   3768    if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
   3769        (arm_hcr_el2_eff(env) & HCR_E2H)) {
   3770        uint16_t mask = ARMMMUIdxBit_E20_2 |
   3771                        ARMMMUIdxBit_E20_2_PAN |
   3772                        ARMMMUIdxBit_E20_0;
   3773
   3774        if (arm_is_secure_below_el3(env)) {
   3775            mask >>= ARM_MMU_IDX_A_NS;
   3776        }
   3777
   3778        tlb_flush_by_mmuidx(env_cpu(env), mask);
   3779    }
   3780    raw_write(env, ri, value);
   3781}
   3782
   3783static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3784                        uint64_t value)
   3785{
   3786    ARMCPU *cpu = env_archcpu(env);
   3787    CPUState *cs = CPU(cpu);
   3788
   3789    /*
   3790     * A change in VMID to the stage2 page table (Stage2) invalidates
   3791     * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
   3792     */
   3793    if (raw_read(env, ri) != value) {
   3794        uint16_t mask = ARMMMUIdxBit_E10_1 |
   3795                        ARMMMUIdxBit_E10_1_PAN |
   3796                        ARMMMUIdxBit_E10_0;
   3797
   3798        if (arm_is_secure_below_el3(env)) {
   3799            mask >>= ARM_MMU_IDX_A_NS;
   3800        }
   3801
   3802        tlb_flush_by_mmuidx(cs, mask);
   3803        raw_write(env, ri, value);
   3804    }
   3805}
   3806
   3807static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
   3808    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
   3809      .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
   3810      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
   3811                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
   3812    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
   3813      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
   3814      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
   3815                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
   3816    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
   3817      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
   3818      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
   3819                             offsetof(CPUARMState, cp15.dfar_ns) } },
   3820    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
   3821      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
   3822      .access = PL1_RW, .accessfn = access_tvm_trvm,
   3823      .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
   3824      .resetvalue = 0, },
   3825    REGINFO_SENTINEL
   3826};
   3827
   3828static const ARMCPRegInfo vmsa_cp_reginfo[] = {
   3829    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
   3830      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
   3831      .access = PL1_RW, .accessfn = access_tvm_trvm,
   3832      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
   3833    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
   3834      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
   3835      .access = PL1_RW, .accessfn = access_tvm_trvm,
   3836      .writefn = vmsa_ttbr_write, .resetvalue = 0,
   3837      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
   3838                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
   3839    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
   3840      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
   3841      .access = PL1_RW, .accessfn = access_tvm_trvm,
   3842      .writefn = vmsa_ttbr_write, .resetvalue = 0,
   3843      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
   3844                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
   3845    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
   3846      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
   3847      .access = PL1_RW, .accessfn = access_tvm_trvm,
   3848      .writefn = vmsa_tcr_el12_write,
   3849      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
   3850      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
   3851    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
   3852      .access = PL1_RW, .accessfn = access_tvm_trvm,
   3853      .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
   3854      .raw_writefn = vmsa_ttbcr_raw_write,
   3855      /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
   3856      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
   3857                             offsetof(CPUARMState, cp15.tcr_el[1])} },
   3858    REGINFO_SENTINEL
   3859};
   3860
   3861/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
   3862 * qemu tlbs nor adjusting cached masks.
   3863 */
   3864static const ARMCPRegInfo ttbcr2_reginfo = {
   3865    .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
   3866    .access = PL1_RW, .accessfn = access_tvm_trvm,
   3867    .type = ARM_CP_ALIAS,
   3868    .bank_fieldoffsets = {
   3869        offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
   3870        offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
   3871    },
   3872};
   3873
   3874static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3875                                uint64_t value)
   3876{
   3877    env->cp15.c15_ticonfig = value & 0xe7;
   3878    /* The OS_TYPE bit in this register changes the reported CPUID! */
   3879    env->cp15.c0_cpuid = (value & (1 << 5)) ?
   3880        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
   3881}
   3882
   3883static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3884                                uint64_t value)
   3885{
   3886    env->cp15.c15_threadid = value & 0xffff;
   3887}
   3888
   3889static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3890                           uint64_t value)
   3891{
   3892    /* Wait-for-interrupt (deprecated) */
   3893    cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
   3894}
   3895
   3896static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3897                                  uint64_t value)
   3898{
   3899    /* On OMAP there are registers indicating the max/min index of dcache lines
   3900     * containing a dirty line; cache flush operations have to reset these.
   3901     */
   3902    env->cp15.c15_i_max = 0x000;
   3903    env->cp15.c15_i_min = 0xff0;
   3904}
   3905
   3906static const ARMCPRegInfo omap_cp_reginfo[] = {
   3907    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
   3908      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
   3909      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
   3910      .resetvalue = 0, },
   3911    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
   3912      .access = PL1_RW, .type = ARM_CP_NOP },
   3913    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
   3914      .access = PL1_RW,
   3915      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
   3916      .writefn = omap_ticonfig_write },
   3917    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
   3918      .access = PL1_RW,
   3919      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
   3920    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
   3921      .access = PL1_RW, .resetvalue = 0xff0,
   3922      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
   3923    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
   3924      .access = PL1_RW,
   3925      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
   3926      .writefn = omap_threadid_write },
   3927    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
   3928      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
   3929      .type = ARM_CP_NO_RAW,
   3930      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
   3931    /* TODO: Peripheral port remap register:
   3932     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
   3933     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
   3934     * when MMU is off.
   3935     */
   3936    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
   3937      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
   3938      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
   3939      .writefn = omap_cachemaint_write },
   3940    { .name = "C9", .cp = 15, .crn = 9,
   3941      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
   3942      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
   3943    REGINFO_SENTINEL
   3944};
   3945
   3946static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3947                              uint64_t value)
   3948{
   3949    env->cp15.c15_cpar = value & 0x3fff;
   3950}
   3951
   3952static const ARMCPRegInfo xscale_cp_reginfo[] = {
   3953    { .name = "XSCALE_CPAR",
   3954      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
   3955      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
   3956      .writefn = xscale_cpar_write, },
   3957    { .name = "XSCALE_AUXCR",
   3958      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
   3959      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
   3960      .resetvalue = 0, },
   3961    /* XScale specific cache-lockdown: since we have no cache we NOP these
   3962     * and hope the guest does not really rely on cache behaviour.
   3963     */
   3964    { .name = "XSCALE_LOCK_ICACHE_LINE",
   3965      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
   3966      .access = PL1_W, .type = ARM_CP_NOP },
   3967    { .name = "XSCALE_UNLOCK_ICACHE",
   3968      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
   3969      .access = PL1_W, .type = ARM_CP_NOP },
   3970    { .name = "XSCALE_DCACHE_LOCK",
   3971      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
   3972      .access = PL1_RW, .type = ARM_CP_NOP },
   3973    { .name = "XSCALE_UNLOCK_DCACHE",
   3974      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
   3975      .access = PL1_W, .type = ARM_CP_NOP },
   3976    REGINFO_SENTINEL
   3977};
   3978
   3979static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
   3980    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
   3981     * implementation of this implementation-defined space.
   3982     * Ideally this should eventually disappear in favour of actually
   3983     * implementing the correct behaviour for all cores.
   3984     */
   3985    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
   3986      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
   3987      .access = PL1_RW,
   3988      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
   3989      .resetvalue = 0 },
   3990    REGINFO_SENTINEL
   3991};
   3992
   3993static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
   3994    /* Cache status: RAZ because we have no cache so it's always clean */
   3995    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
   3996      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   3997      .resetvalue = 0 },
   3998    REGINFO_SENTINEL
   3999};
   4000
   4001static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
   4002    /* We never have a a block transfer operation in progress */
   4003    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
   4004      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4005      .resetvalue = 0 },
   4006    /* The cache ops themselves: these all NOP for QEMU */
   4007    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
   4008      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4009    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
   4010      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4011    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
   4012      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4013    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
   4014      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4015    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
   4016      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4017    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
   4018      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4019    REGINFO_SENTINEL
   4020};
   4021
   4022static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
   4023    /* The cache test-and-clean instructions always return (1 << 30)
   4024     * to indicate that there are no dirty cache lines.
   4025     */
   4026    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
   4027      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4028      .resetvalue = (1 << 30) },
   4029    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
   4030      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4031      .resetvalue = (1 << 30) },
   4032    REGINFO_SENTINEL
   4033};
   4034
   4035static const ARMCPRegInfo strongarm_cp_reginfo[] = {
   4036    /* Ignore ReadBuffer accesses */
   4037    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
   4038      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
   4039      .access = PL1_RW, .resetvalue = 0,
   4040      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
   4041    REGINFO_SENTINEL
   4042};
   4043
   4044static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4045{
   4046    unsigned int cur_el = arm_current_el(env);
   4047
   4048    if (arm_is_el2_enabled(env) && cur_el == 1) {
   4049        return env->cp15.vpidr_el2;
   4050    }
   4051    return raw_read(env, ri);
   4052}
   4053
   4054static uint64_t mpidr_read_val(CPUARMState *env)
   4055{
   4056    ARMCPU *cpu = env_archcpu(env);
   4057    uint64_t mpidr = cpu->mp_affinity;
   4058
   4059    if (arm_feature(env, ARM_FEATURE_V7MP)) {
   4060        mpidr |= (1U << 31);
   4061        /* Cores which are uniprocessor (non-coherent)
   4062         * but still implement the MP extensions set
   4063         * bit 30. (For instance, Cortex-R5).
   4064         */
   4065        if (cpu->mp_is_up) {
   4066            mpidr |= (1u << 30);
   4067        }
   4068    }
   4069    return mpidr;
   4070}
   4071
   4072static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4073{
   4074    unsigned int cur_el = arm_current_el(env);
   4075
   4076    if (arm_is_el2_enabled(env) && cur_el == 1) {
   4077        return env->cp15.vmpidr_el2;
   4078    }
   4079    return mpidr_read_val(env);
   4080}
   4081
   4082static const ARMCPRegInfo lpae_cp_reginfo[] = {
   4083    /* NOP AMAIR0/1 */
   4084    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
   4085      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
   4086      .access = PL1_RW, .accessfn = access_tvm_trvm,
   4087      .type = ARM_CP_CONST, .resetvalue = 0 },
   4088    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
   4089    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
   4090      .access = PL1_RW, .accessfn = access_tvm_trvm,
   4091      .type = ARM_CP_CONST, .resetvalue = 0 },
   4092    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
   4093      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
   4094      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
   4095                             offsetof(CPUARMState, cp15.par_ns)} },
   4096    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
   4097      .access = PL1_RW, .accessfn = access_tvm_trvm,
   4098      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   4099      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
   4100                             offsetof(CPUARMState, cp15.ttbr0_ns) },
   4101      .writefn = vmsa_ttbr_write, },
   4102    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
   4103      .access = PL1_RW, .accessfn = access_tvm_trvm,
   4104      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   4105      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
   4106                             offsetof(CPUARMState, cp15.ttbr1_ns) },
   4107      .writefn = vmsa_ttbr_write, },
   4108    REGINFO_SENTINEL
   4109};
   4110
   4111static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4112{
   4113    return vfp_get_fpcr(env);
   4114}
   4115
   4116static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4117                            uint64_t value)
   4118{
   4119    vfp_set_fpcr(env, value);
   4120}
   4121
   4122static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4123{
   4124    return vfp_get_fpsr(env);
   4125}
   4126
   4127static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4128                            uint64_t value)
   4129{
   4130    vfp_set_fpsr(env, value);
   4131}
   4132
   4133static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4134                                       bool isread)
   4135{
   4136    if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
   4137        return CP_ACCESS_TRAP;
   4138    }
   4139    return CP_ACCESS_OK;
   4140}
   4141
   4142static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4143                            uint64_t value)
   4144{
   4145    env->daif = value & PSTATE_DAIF;
   4146}
   4147
   4148static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4149{
   4150    return env->pstate & PSTATE_PAN;
   4151}
   4152
   4153static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4154                           uint64_t value)
   4155{
   4156    env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
   4157}
   4158
   4159static const ARMCPRegInfo pan_reginfo = {
   4160    .name = "PAN", .state = ARM_CP_STATE_AA64,
   4161    .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
   4162    .type = ARM_CP_NO_RAW, .access = PL1_RW,
   4163    .readfn = aa64_pan_read, .writefn = aa64_pan_write
   4164};
   4165
   4166static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4167{
   4168    return env->pstate & PSTATE_UAO;
   4169}
   4170
   4171static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4172                           uint64_t value)
   4173{
   4174    env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
   4175}
   4176
   4177static const ARMCPRegInfo uao_reginfo = {
   4178    .name = "UAO", .state = ARM_CP_STATE_AA64,
   4179    .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
   4180    .type = ARM_CP_NO_RAW, .access = PL1_RW,
   4181    .readfn = aa64_uao_read, .writefn = aa64_uao_write
   4182};
   4183
   4184static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4185{
   4186    return env->pstate & PSTATE_DIT;
   4187}
   4188
   4189static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4190                           uint64_t value)
   4191{
   4192    env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
   4193}
   4194
   4195static const ARMCPRegInfo dit_reginfo = {
   4196    .name = "DIT", .state = ARM_CP_STATE_AA64,
   4197    .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
   4198    .type = ARM_CP_NO_RAW, .access = PL0_RW,
   4199    .readfn = aa64_dit_read, .writefn = aa64_dit_write
   4200};
   4201
   4202static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4203{
   4204    return env->pstate & PSTATE_SSBS;
   4205}
   4206
   4207static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4208                           uint64_t value)
   4209{
   4210    env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
   4211}
   4212
   4213static const ARMCPRegInfo ssbs_reginfo = {
   4214    .name = "SSBS", .state = ARM_CP_STATE_AA64,
   4215    .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
   4216    .type = ARM_CP_NO_RAW, .access = PL0_RW,
   4217    .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
   4218};
   4219
   4220static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
   4221                                              const ARMCPRegInfo *ri,
   4222                                              bool isread)
   4223{
   4224    /* Cache invalidate/clean to Point of Coherency or Persistence...  */
   4225    switch (arm_current_el(env)) {
   4226    case 0:
   4227        /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
   4228        if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
   4229            return CP_ACCESS_TRAP;
   4230        }
   4231        /* fall through */
   4232    case 1:
   4233        /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
   4234        if (arm_hcr_el2_eff(env) & HCR_TPCP) {
   4235            return CP_ACCESS_TRAP_EL2;
   4236        }
   4237        break;
   4238    }
   4239    return CP_ACCESS_OK;
   4240}
   4241
   4242static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
   4243                                              const ARMCPRegInfo *ri,
   4244                                              bool isread)
   4245{
   4246    /* Cache invalidate/clean to Point of Unification... */
   4247    switch (arm_current_el(env)) {
   4248    case 0:
   4249        /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
   4250        if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
   4251            return CP_ACCESS_TRAP;
   4252        }
   4253        /* fall through */
   4254    case 1:
   4255        /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set.  */
   4256        if (arm_hcr_el2_eff(env) & HCR_TPU) {
   4257            return CP_ACCESS_TRAP_EL2;
   4258        }
   4259        break;
   4260    }
   4261    return CP_ACCESS_OK;
   4262}
   4263
   4264/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
   4265 * Page D4-1736 (DDI0487A.b)
   4266 */
   4267
   4268static int vae1_tlbmask(CPUARMState *env)
   4269{
   4270    uint64_t hcr = arm_hcr_el2_eff(env);
   4271    uint16_t mask;
   4272
   4273    if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   4274        mask = ARMMMUIdxBit_E20_2 |
   4275               ARMMMUIdxBit_E20_2_PAN |
   4276               ARMMMUIdxBit_E20_0;
   4277    } else {
   4278        mask = ARMMMUIdxBit_E10_1 |
   4279               ARMMMUIdxBit_E10_1_PAN |
   4280               ARMMMUIdxBit_E10_0;
   4281    }
   4282
   4283    if (arm_is_secure_below_el3(env)) {
   4284        mask >>= ARM_MMU_IDX_A_NS;
   4285    }
   4286
   4287    return mask;
   4288}
   4289
   4290/* Return 56 if TBI is enabled, 64 otherwise. */
   4291static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
   4292                              uint64_t addr)
   4293{
   4294    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
   4295    int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
   4296    int select = extract64(addr, 55, 1);
   4297
   4298    return (tbi >> select) & 1 ? 56 : 64;
   4299}
   4300
   4301static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
   4302{
   4303    uint64_t hcr = arm_hcr_el2_eff(env);
   4304    ARMMMUIdx mmu_idx;
   4305
   4306    /* Only the regime of the mmu_idx below is significant. */
   4307    if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   4308        mmu_idx = ARMMMUIdx_E20_0;
   4309    } else {
   4310        mmu_idx = ARMMMUIdx_E10_0;
   4311    }
   4312
   4313    if (arm_is_secure_below_el3(env)) {
   4314        mmu_idx &= ~ARM_MMU_IDX_A_NS;
   4315    }
   4316
   4317    return tlbbits_for_regime(env, mmu_idx, addr);
   4318}
   4319
   4320static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4321                                      uint64_t value)
   4322{
   4323    CPUState *cs = env_cpu(env);
   4324    int mask = vae1_tlbmask(env);
   4325
   4326    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4327}
   4328
   4329static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4330                                    uint64_t value)
   4331{
   4332    CPUState *cs = env_cpu(env);
   4333    int mask = vae1_tlbmask(env);
   4334
   4335    if (tlb_force_broadcast(env)) {
   4336        tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4337    } else {
   4338        tlb_flush_by_mmuidx(cs, mask);
   4339    }
   4340}
   4341
   4342static int alle1_tlbmask(CPUARMState *env)
   4343{
   4344    /*
   4345     * Note that the 'ALL' scope must invalidate both stage 1 and
   4346     * stage 2 translations, whereas most other scopes only invalidate
   4347     * stage 1 translations.
   4348     */
   4349    if (arm_is_secure_below_el3(env)) {
   4350        return ARMMMUIdxBit_SE10_1 |
   4351               ARMMMUIdxBit_SE10_1_PAN |
   4352               ARMMMUIdxBit_SE10_0;
   4353    } else {
   4354        return ARMMMUIdxBit_E10_1 |
   4355               ARMMMUIdxBit_E10_1_PAN |
   4356               ARMMMUIdxBit_E10_0;
   4357    }
   4358}
   4359
   4360static int e2_tlbmask(CPUARMState *env)
   4361{
   4362    if (arm_is_secure_below_el3(env)) {
   4363        return ARMMMUIdxBit_SE20_0 |
   4364               ARMMMUIdxBit_SE20_2 |
   4365               ARMMMUIdxBit_SE20_2_PAN |
   4366               ARMMMUIdxBit_SE2;
   4367    } else {
   4368        return ARMMMUIdxBit_E20_0 |
   4369               ARMMMUIdxBit_E20_2 |
   4370               ARMMMUIdxBit_E20_2_PAN |
   4371               ARMMMUIdxBit_E2;
   4372    }
   4373}
   4374
   4375static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4376                                  uint64_t value)
   4377{
   4378    CPUState *cs = env_cpu(env);
   4379    int mask = alle1_tlbmask(env);
   4380
   4381    tlb_flush_by_mmuidx(cs, mask);
   4382}
   4383
   4384static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4385                                  uint64_t value)
   4386{
   4387    CPUState *cs = env_cpu(env);
   4388    int mask = e2_tlbmask(env);
   4389
   4390    tlb_flush_by_mmuidx(cs, mask);
   4391}
   4392
   4393static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4394                                  uint64_t value)
   4395{
   4396    ARMCPU *cpu = env_archcpu(env);
   4397    CPUState *cs = CPU(cpu);
   4398
   4399    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
   4400}
   4401
   4402static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4403                                    uint64_t value)
   4404{
   4405    CPUState *cs = env_cpu(env);
   4406    int mask = alle1_tlbmask(env);
   4407
   4408    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4409}
   4410
   4411static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4412                                    uint64_t value)
   4413{
   4414    CPUState *cs = env_cpu(env);
   4415    int mask = e2_tlbmask(env);
   4416
   4417    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4418}
   4419
   4420static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4421                                    uint64_t value)
   4422{
   4423    CPUState *cs = env_cpu(env);
   4424
   4425    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
   4426}
   4427
   4428static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4429                                 uint64_t value)
   4430{
   4431    /* Invalidate by VA, EL2
   4432     * Currently handles both VAE2 and VALE2, since we don't support
   4433     * flush-last-level-only.
   4434     */
   4435    CPUState *cs = env_cpu(env);
   4436    int mask = e2_tlbmask(env);
   4437    uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4438
   4439    tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
   4440}
   4441
   4442static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4443                                 uint64_t value)
   4444{
   4445    /* Invalidate by VA, EL3
   4446     * Currently handles both VAE3 and VALE3, since we don't support
   4447     * flush-last-level-only.
   4448     */
   4449    ARMCPU *cpu = env_archcpu(env);
   4450    CPUState *cs = CPU(cpu);
   4451    uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4452
   4453    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
   4454}
   4455
   4456static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4457                                   uint64_t value)
   4458{
   4459    CPUState *cs = env_cpu(env);
   4460    int mask = vae1_tlbmask(env);
   4461    uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4462    int bits = vae1_tlbbits(env, pageaddr);
   4463
   4464    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
   4465}
   4466
   4467static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4468                                 uint64_t value)
   4469{
   4470    /* Invalidate by VA, EL1&0 (AArch64 version).
   4471     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
   4472     * since we don't support flush-for-specific-ASID-only or
   4473     * flush-last-level-only.
   4474     */
   4475    CPUState *cs = env_cpu(env);
   4476    int mask = vae1_tlbmask(env);
   4477    uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4478    int bits = vae1_tlbbits(env, pageaddr);
   4479
   4480    if (tlb_force_broadcast(env)) {
   4481        tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
   4482    } else {
   4483        tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
   4484    }
   4485}
   4486
   4487static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4488                                   uint64_t value)
   4489{
   4490    CPUState *cs = env_cpu(env);
   4491    uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4492    bool secure = arm_is_secure_below_el3(env);
   4493    int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
   4494    int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
   4495                                  pageaddr);
   4496
   4497    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
   4498}
   4499
   4500static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4501                                   uint64_t value)
   4502{
   4503    CPUState *cs = env_cpu(env);
   4504    uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4505    int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
   4506
   4507    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
   4508                                                  ARMMMUIdxBit_SE3, bits);
   4509}
   4510
   4511#ifdef TARGET_AARCH64
   4512static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
   4513                                           uint64_t value)
   4514{
   4515    unsigned int page_shift;
   4516    unsigned int page_size_granule;
   4517    uint64_t num;
   4518    uint64_t scale;
   4519    uint64_t exponent;
   4520    uint64_t length;
   4521
   4522    num = extract64(value, 39, 4);
   4523    scale = extract64(value, 44, 2);
   4524    page_size_granule = extract64(value, 46, 2);
   4525
   4526    page_shift = page_size_granule * 2 + 12;
   4527
   4528    if (page_size_granule == 0) {
   4529        qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
   4530                      page_size_granule);
   4531        return 0;
   4532    }
   4533
   4534    exponent = (5 * scale) + 1;
   4535    length = (num + 1) << (exponent + page_shift);
   4536
   4537    return length;
   4538}
   4539
   4540static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
   4541                                        bool two_ranges)
   4542{
   4543    /* TODO: ARMv8.7 FEAT_LPA2 */
   4544    uint64_t pageaddr;
   4545
   4546    if (two_ranges) {
   4547        pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
   4548    } else {
   4549        pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
   4550    }
   4551
   4552    return pageaddr;
   4553}
   4554
   4555static void do_rvae_write(CPUARMState *env, uint64_t value,
   4556                          int idxmap, bool synced)
   4557{
   4558    ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
   4559    bool two_ranges = regime_has_2_ranges(one_idx);
   4560    uint64_t baseaddr, length;
   4561    int bits;
   4562
   4563    baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
   4564    length = tlbi_aa64_range_get_length(env, value);
   4565    bits = tlbbits_for_regime(env, one_idx, baseaddr);
   4566
   4567    if (synced) {
   4568        tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
   4569                                                  baseaddr,
   4570                                                  length,
   4571                                                  idxmap,
   4572                                                  bits);
   4573    } else {
   4574        tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
   4575                                  length, idxmap, bits);
   4576    }
   4577}
   4578
   4579static void tlbi_aa64_rvae1_write(CPUARMState *env,
   4580                                  const ARMCPRegInfo *ri,
   4581                                  uint64_t value)
   4582{
   4583    /*
   4584     * Invalidate by VA range, EL1&0.
   4585     * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
   4586     * since we don't support flush-for-specific-ASID-only or
   4587     * flush-last-level-only.
   4588     */
   4589
   4590    do_rvae_write(env, value, vae1_tlbmask(env),
   4591                  tlb_force_broadcast(env));
   4592}
   4593
   4594static void tlbi_aa64_rvae1is_write(CPUARMState *env,
   4595                                    const ARMCPRegInfo *ri,
   4596                                    uint64_t value)
   4597{
   4598    /*
   4599     * Invalidate by VA range, Inner/Outer Shareable EL1&0.
   4600     * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
   4601     * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
   4602     * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
   4603     * shareable specific flushes.
   4604     */
   4605
   4606    do_rvae_write(env, value, vae1_tlbmask(env), true);
   4607}
   4608
   4609static int vae2_tlbmask(CPUARMState *env)
   4610{
   4611    return (arm_is_secure_below_el3(env)
   4612            ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
   4613}
   4614
   4615static void tlbi_aa64_rvae2_write(CPUARMState *env,
   4616                                  const ARMCPRegInfo *ri,
   4617                                  uint64_t value)
   4618{
   4619    /*
   4620     * Invalidate by VA range, EL2.
   4621     * Currently handles all of RVAE2 and RVALE2,
   4622     * since we don't support flush-for-specific-ASID-only or
   4623     * flush-last-level-only.
   4624     */
   4625
   4626    do_rvae_write(env, value, vae2_tlbmask(env),
   4627                  tlb_force_broadcast(env));
   4628
   4629
   4630}
   4631
   4632static void tlbi_aa64_rvae2is_write(CPUARMState *env,
   4633                                    const ARMCPRegInfo *ri,
   4634                                    uint64_t value)
   4635{
   4636    /*
   4637     * Invalidate by VA range, Inner/Outer Shareable, EL2.
   4638     * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
   4639     * since we don't support flush-for-specific-ASID-only,
   4640     * flush-last-level-only or inner/outer shareable specific flushes.
   4641     */
   4642
   4643    do_rvae_write(env, value, vae2_tlbmask(env), true);
   4644
   4645}
   4646
   4647static void tlbi_aa64_rvae3_write(CPUARMState *env,
   4648                                  const ARMCPRegInfo *ri,
   4649                                  uint64_t value)
   4650{
   4651    /*
   4652     * Invalidate by VA range, EL3.
   4653     * Currently handles all of RVAE3 and RVALE3,
   4654     * since we don't support flush-for-specific-ASID-only or
   4655     * flush-last-level-only.
   4656     */
   4657
   4658    do_rvae_write(env, value, ARMMMUIdxBit_SE3,
   4659                  tlb_force_broadcast(env));
   4660}
   4661
   4662static void tlbi_aa64_rvae3is_write(CPUARMState *env,
   4663                                    const ARMCPRegInfo *ri,
   4664                                    uint64_t value)
   4665{
   4666    /*
   4667     * Invalidate by VA range, EL3, Inner/Outer Shareable.
   4668     * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
   4669     * since we don't support flush-for-specific-ASID-only,
   4670     * flush-last-level-only or inner/outer specific flushes.
   4671     */
   4672
   4673    do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
   4674}
   4675#endif
   4676
   4677static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4678                                      bool isread)
   4679{
   4680    int cur_el = arm_current_el(env);
   4681
   4682    if (cur_el < 2) {
   4683        uint64_t hcr = arm_hcr_el2_eff(env);
   4684
   4685        if (cur_el == 0) {
   4686            if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   4687                if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
   4688                    return CP_ACCESS_TRAP_EL2;
   4689                }
   4690            } else {
   4691                if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
   4692                    return CP_ACCESS_TRAP;
   4693                }
   4694                if (hcr & HCR_TDZ) {
   4695                    return CP_ACCESS_TRAP_EL2;
   4696                }
   4697            }
   4698        } else if (hcr & HCR_TDZ) {
   4699            return CP_ACCESS_TRAP_EL2;
   4700        }
   4701    }
   4702    return CP_ACCESS_OK;
   4703}
   4704
   4705static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4706{
   4707    ARMCPU *cpu = env_archcpu(env);
   4708    int dzp_bit = 1 << 4;
   4709
   4710    /* DZP indicates whether DC ZVA access is allowed */
   4711    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
   4712        dzp_bit = 0;
   4713    }
   4714    return cpu->dcz_blocksize | dzp_bit;
   4715}
   4716
   4717static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4718                                    bool isread)
   4719{
   4720    if (!(env->pstate & PSTATE_SP)) {
   4721        /* Access to SP_EL0 is undefined if it's being used as
   4722         * the stack pointer.
   4723         */
   4724        return CP_ACCESS_TRAP_UNCATEGORIZED;
   4725    }
   4726    return CP_ACCESS_OK;
   4727}
   4728
   4729static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4730{
   4731    return env->pstate & PSTATE_SP;
   4732}
   4733
   4734static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
   4735{
   4736    update_spsel(env, val);
   4737}
   4738
   4739static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4740                        uint64_t value)
   4741{
   4742    ARMCPU *cpu = env_archcpu(env);
   4743
   4744    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
   4745        /* M bit is RAZ/WI for PMSA with no MPU implemented */
   4746        value &= ~SCTLR_M;
   4747    }
   4748
   4749    /* ??? Lots of these bits are not implemented.  */
   4750
   4751    if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
   4752        if (ri->opc1 == 6) { /* SCTLR_EL3 */
   4753            value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
   4754        } else {
   4755            value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
   4756                       SCTLR_ATA0 | SCTLR_ATA);
   4757        }
   4758    }
   4759
   4760    if (raw_read(env, ri) == value) {
   4761        /* Skip the TLB flush if nothing actually changed; Linux likes
   4762         * to do a lot of pointless SCTLR writes.
   4763         */
   4764        return;
   4765    }
   4766
   4767    raw_write(env, ri, value);
   4768
   4769    /* This may enable/disable the MMU, so do a TLB flush.  */
   4770    tlb_flush(CPU(cpu));
   4771
   4772    if (ri->type & ARM_CP_SUPPRESS_TB_END) {
   4773        /*
   4774         * Normally we would always end the TB on an SCTLR write; see the
   4775         * comment in ARMCPRegInfo sctlr initialization below for why Xscale
   4776         * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
   4777         * of hflags from the translator, so do it here.
   4778         */
   4779        arm_rebuild_hflags(env);
   4780    }
   4781}
   4782
   4783static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4784                                     bool isread)
   4785{
   4786    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
   4787        return CP_ACCESS_TRAP_FP_EL2;
   4788    }
   4789    if (env->cp15.cptr_el[3] & CPTR_TFP) {
   4790        return CP_ACCESS_TRAP_FP_EL3;
   4791    }
   4792    return CP_ACCESS_OK;
   4793}
   4794
   4795static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4796                       uint64_t value)
   4797{
   4798    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
   4799}
   4800
   4801static const ARMCPRegInfo v8_cp_reginfo[] = {
   4802    /* Minimal set of EL0-visible registers. This will need to be expanded
   4803     * significantly for system emulation of AArch64 CPUs.
   4804     */
   4805    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
   4806      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
   4807      .access = PL0_RW, .type = ARM_CP_NZCV },
   4808    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
   4809      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
   4810      .type = ARM_CP_NO_RAW,
   4811      .access = PL0_RW, .accessfn = aa64_daif_access,
   4812      .fieldoffset = offsetof(CPUARMState, daif),
   4813      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
   4814    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
   4815      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
   4816      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
   4817      .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
   4818    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
   4819      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
   4820      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
   4821      .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
   4822    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
   4823      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
   4824      .access = PL0_R, .type = ARM_CP_NO_RAW,
   4825      .readfn = aa64_dczid_read },
   4826    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
   4827      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
   4828      .access = PL0_W, .type = ARM_CP_DC_ZVA,
   4829#ifndef CONFIG_USER_ONLY
   4830      /* Avoid overhead of an access check that always passes in user-mode */
   4831      .accessfn = aa64_zva_access,
   4832#endif
   4833    },
   4834    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
   4835      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
   4836      .access = PL1_R, .type = ARM_CP_CURRENTEL },
   4837    /* Cache ops: all NOPs since we don't emulate caches */
   4838    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
   4839      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
   4840      .access = PL1_W, .type = ARM_CP_NOP,
   4841      .accessfn = aa64_cacheop_pou_access },
   4842    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
   4843      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
   4844      .access = PL1_W, .type = ARM_CP_NOP,
   4845      .accessfn = aa64_cacheop_pou_access },
   4846    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
   4847      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
   4848      .access = PL0_W, .type = ARM_CP_NOP,
   4849      .accessfn = aa64_cacheop_pou_access },
   4850    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
   4851      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
   4852      .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
   4853      .type = ARM_CP_NOP },
   4854    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
   4855      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
   4856      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
   4857    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
   4858      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
   4859      .access = PL0_W, .type = ARM_CP_NOP,
   4860      .accessfn = aa64_cacheop_poc_access },
   4861    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
   4862      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
   4863      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
   4864    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
   4865      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
   4866      .access = PL0_W, .type = ARM_CP_NOP,
   4867      .accessfn = aa64_cacheop_pou_access },
   4868    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
   4869      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
   4870      .access = PL0_W, .type = ARM_CP_NOP,
   4871      .accessfn = aa64_cacheop_poc_access },
   4872    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
   4873      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
   4874      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
   4875    /* TLBI operations */
   4876    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
   4877      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
   4878      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4879      .writefn = tlbi_aa64_vmalle1is_write },
   4880    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
   4881      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
   4882      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4883      .writefn = tlbi_aa64_vae1is_write },
   4884    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
   4885      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
   4886      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4887      .writefn = tlbi_aa64_vmalle1is_write },
   4888    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
   4889      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
   4890      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4891      .writefn = tlbi_aa64_vae1is_write },
   4892    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
   4893      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
   4894      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4895      .writefn = tlbi_aa64_vae1is_write },
   4896    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
   4897      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
   4898      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4899      .writefn = tlbi_aa64_vae1is_write },
   4900    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
   4901      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
   4902      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4903      .writefn = tlbi_aa64_vmalle1_write },
   4904    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
   4905      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
   4906      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4907      .writefn = tlbi_aa64_vae1_write },
   4908    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
   4909      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
   4910      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4911      .writefn = tlbi_aa64_vmalle1_write },
   4912    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
   4913      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
   4914      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4915      .writefn = tlbi_aa64_vae1_write },
   4916    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
   4917      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
   4918      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4919      .writefn = tlbi_aa64_vae1_write },
   4920    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
   4921      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
   4922      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4923      .writefn = tlbi_aa64_vae1_write },
   4924    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
   4925      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
   4926      .access = PL2_W, .type = ARM_CP_NOP },
   4927    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
   4928      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
   4929      .access = PL2_W, .type = ARM_CP_NOP },
   4930    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
   4931      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
   4932      .access = PL2_W, .type = ARM_CP_NO_RAW,
   4933      .writefn = tlbi_aa64_alle1is_write },
   4934    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
   4935      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
   4936      .access = PL2_W, .type = ARM_CP_NO_RAW,
   4937      .writefn = tlbi_aa64_alle1is_write },
   4938    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
   4939      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
   4940      .access = PL2_W, .type = ARM_CP_NOP },
   4941    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
   4942      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
   4943      .access = PL2_W, .type = ARM_CP_NOP },
   4944    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
   4945      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
   4946      .access = PL2_W, .type = ARM_CP_NO_RAW,
   4947      .writefn = tlbi_aa64_alle1_write },
   4948    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
   4949      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
   4950      .access = PL2_W, .type = ARM_CP_NO_RAW,
   4951      .writefn = tlbi_aa64_alle1is_write },
   4952#ifndef CONFIG_USER_ONLY
   4953    /* 64 bit address translation operations */
   4954    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
   4955      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
   4956      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4957      .writefn = ats_write64 },
   4958    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
   4959      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
   4960      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4961      .writefn = ats_write64 },
   4962    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
   4963      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
   4964      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4965      .writefn = ats_write64 },
   4966    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
   4967      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
   4968      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4969      .writefn = ats_write64 },
   4970    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
   4971      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
   4972      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4973      .writefn = ats_write64 },
   4974    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
   4975      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
   4976      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4977      .writefn = ats_write64 },
   4978    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
   4979      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
   4980      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4981      .writefn = ats_write64 },
   4982    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
   4983      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
   4984      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4985      .writefn = ats_write64 },
   4986    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
   4987    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
   4988      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
   4989      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4990      .writefn = ats_write64 },
   4991    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
   4992      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
   4993      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   4994      .writefn = ats_write64 },
   4995    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
   4996      .type = ARM_CP_ALIAS,
   4997      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
   4998      .access = PL1_RW, .resetvalue = 0,
   4999      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
   5000      .writefn = par_write },
   5001#endif
   5002    /* TLB invalidate last level of translation table walk */
   5003    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
   5004      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5005      .writefn = tlbimva_is_write },
   5006    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
   5007      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5008      .writefn = tlbimvaa_is_write },
   5009    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
   5010      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5011      .writefn = tlbimva_write },
   5012    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
   5013      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5014      .writefn = tlbimvaa_write },
   5015    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
   5016      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5017      .writefn = tlbimva_hyp_write },
   5018    { .name = "TLBIMVALHIS",
   5019      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
   5020      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5021      .writefn = tlbimva_hyp_is_write },
   5022    { .name = "TLBIIPAS2",
   5023      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
   5024      .type = ARM_CP_NOP, .access = PL2_W },
   5025    { .name = "TLBIIPAS2IS",
   5026      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
   5027      .type = ARM_CP_NOP, .access = PL2_W },
   5028    { .name = "TLBIIPAS2L",
   5029      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
   5030      .type = ARM_CP_NOP, .access = PL2_W },
   5031    { .name = "TLBIIPAS2LIS",
   5032      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
   5033      .type = ARM_CP_NOP, .access = PL2_W },
   5034    /* 32 bit cache operations */
   5035    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
   5036      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5037    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
   5038      .type = ARM_CP_NOP, .access = PL1_W },
   5039    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
   5040      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5041    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
   5042      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5043    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
   5044      .type = ARM_CP_NOP, .access = PL1_W },
   5045    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
   5046      .type = ARM_CP_NOP, .access = PL1_W },
   5047    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
   5048      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
   5049    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
   5050      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   5051    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
   5052      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
   5053    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
   5054      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   5055    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
   5056      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5057    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
   5058      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
   5059    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
   5060      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   5061    /* MMU Domain access control / MPU write buffer control */
   5062    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
   5063      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
   5064      .writefn = dacr_write, .raw_writefn = raw_write,
   5065      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
   5066                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
   5067    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
   5068      .type = ARM_CP_ALIAS,
   5069      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
   5070      .access = PL1_RW,
   5071      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
   5072    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
   5073      .type = ARM_CP_ALIAS,
   5074      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
   5075      .access = PL1_RW,
   5076      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
   5077    /* We rely on the access checks not allowing the guest to write to the
   5078     * state field when SPSel indicates that it's being used as the stack
   5079     * pointer.
   5080     */
   5081    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
   5082      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
   5083      .access = PL1_RW, .accessfn = sp_el0_access,
   5084      .type = ARM_CP_ALIAS,
   5085      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
   5086    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
   5087      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
   5088      .access = PL2_RW, .type = ARM_CP_ALIAS,
   5089      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
   5090    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
   5091      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
   5092      .type = ARM_CP_NO_RAW,
   5093      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
   5094    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
   5095      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
   5096      .type = ARM_CP_ALIAS,
   5097      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
   5098      .access = PL2_RW, .accessfn = fpexc32_access },
   5099    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
   5100      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
   5101      .access = PL2_RW, .resetvalue = 0,
   5102      .writefn = dacr_write, .raw_writefn = raw_write,
   5103      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
   5104    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
   5105      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
   5106      .access = PL2_RW, .resetvalue = 0,
   5107      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
   5108    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
   5109      .type = ARM_CP_ALIAS,
   5110      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
   5111      .access = PL2_RW,
   5112      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
   5113    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
   5114      .type = ARM_CP_ALIAS,
   5115      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
   5116      .access = PL2_RW,
   5117      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
   5118    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
   5119      .type = ARM_CP_ALIAS,
   5120      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
   5121      .access = PL2_RW,
   5122      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
   5123    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
   5124      .type = ARM_CP_ALIAS,
   5125      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
   5126      .access = PL2_RW,
   5127      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
   5128    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
   5129      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
   5130      .resetvalue = 0,
   5131      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
   5132    { .name = "SDCR", .type = ARM_CP_ALIAS,
   5133      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
   5134      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
   5135      .writefn = sdcr_write,
   5136      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
   5137    REGINFO_SENTINEL
   5138};
   5139
   5140/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
   5141static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
   5142    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
   5143      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
   5144      .access = PL2_RW,
   5145      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
   5146    { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
   5147      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
   5148      .access = PL2_RW,
   5149      .type = ARM_CP_CONST, .resetvalue = 0 },
   5150    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
   5151      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
   5152      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5153    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
   5154      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
   5155      .access = PL2_RW,
   5156      .type = ARM_CP_CONST, .resetvalue = 0 },
   5157    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
   5158      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
   5159      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5160    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
   5161      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
   5162      .access = PL2_RW, .type = ARM_CP_CONST,
   5163      .resetvalue = 0 },
   5164    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
   5165      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
   5166      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5167    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
   5168      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
   5169      .access = PL2_RW, .type = ARM_CP_CONST,
   5170      .resetvalue = 0 },
   5171    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
   5172      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
   5173      .access = PL2_RW, .type = ARM_CP_CONST,
   5174      .resetvalue = 0 },
   5175    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
   5176      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
   5177      .access = PL2_RW, .type = ARM_CP_CONST,
   5178      .resetvalue = 0 },
   5179    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
   5180      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
   5181      .access = PL2_RW, .type = ARM_CP_CONST,
   5182      .resetvalue = 0 },
   5183    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
   5184      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
   5185      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5186    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
   5187      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
   5188      .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5189      .type = ARM_CP_CONST, .resetvalue = 0 },
   5190    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
   5191      .cp = 15, .opc1 = 6, .crm = 2,
   5192      .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5193      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
   5194    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
   5195      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
   5196      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5197    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
   5198      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
   5199      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5200    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
   5201      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
   5202      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5203    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
   5204      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
   5205      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5206    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
   5207      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
   5208      .resetvalue = 0 },
   5209    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
   5210      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
   5211      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5212    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
   5213      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
   5214      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5215    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
   5216      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
   5217      .resetvalue = 0 },
   5218    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
   5219      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
   5220      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5221    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
   5222      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
   5223      .resetvalue = 0 },
   5224    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
   5225      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
   5226      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5227    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
   5228      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
   5229      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5230    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
   5231      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
   5232      .access = PL2_RW, .accessfn = access_tda,
   5233      .type = ARM_CP_CONST, .resetvalue = 0 },
   5234    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
   5235      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
   5236      .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5237      .type = ARM_CP_CONST, .resetvalue = 0 },
   5238    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
   5239      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
   5240      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5241    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
   5242      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
   5243      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5244    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
   5245      .type = ARM_CP_CONST,
   5246      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
   5247      .access = PL2_RW, .resetvalue = 0 },
   5248    REGINFO_SENTINEL
   5249};
   5250
   5251/* Ditto, but for registers which exist in ARMv8 but not v7 */
   5252static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
   5253    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
   5254      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
   5255      .access = PL2_RW,
   5256      .type = ARM_CP_CONST, .resetvalue = 0 },
   5257    REGINFO_SENTINEL
   5258};
   5259
   5260static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
   5261{
   5262    ARMCPU *cpu = env_archcpu(env);
   5263
   5264    if (arm_feature(env, ARM_FEATURE_V8)) {
   5265        valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
   5266    } else {
   5267        valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
   5268    }
   5269
   5270    if (arm_feature(env, ARM_FEATURE_EL3)) {
   5271        valid_mask &= ~HCR_HCD;
   5272    } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
   5273        /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
   5274         * However, if we're using the SMC PSCI conduit then QEMU is
   5275         * effectively acting like EL3 firmware and so the guest at
   5276         * EL2 should retain the ability to prevent EL1 from being
   5277         * able to make SMC calls into the ersatz firmware, so in
   5278         * that case HCR.TSC should be read/write.
   5279         */
   5280        valid_mask &= ~HCR_TSC;
   5281    }
   5282
   5283    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
   5284        if (cpu_isar_feature(aa64_vh, cpu)) {
   5285            valid_mask |= HCR_E2H;
   5286        }
   5287        if (cpu_isar_feature(aa64_lor, cpu)) {
   5288            valid_mask |= HCR_TLOR;
   5289        }
   5290        if (cpu_isar_feature(aa64_pauth, cpu)) {
   5291            valid_mask |= HCR_API | HCR_APK;
   5292        }
   5293        if (cpu_isar_feature(aa64_mte, cpu)) {
   5294            valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
   5295        }
   5296    }
   5297
   5298    /* Clear RES0 bits.  */
   5299    value &= valid_mask;
   5300
   5301    /*
   5302     * These bits change the MMU setup:
   5303     * HCR_VM enables stage 2 translation
   5304     * HCR_PTW forbids certain page-table setups
   5305     * HCR_DC disables stage1 and enables stage2 translation
   5306     * HCR_DCT enables tagging on (disabled) stage1 translation
   5307     */
   5308    if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
   5309        tlb_flush(CPU(cpu));
   5310    }
   5311    env->cp15.hcr_el2 = value;
   5312
   5313    /*
   5314     * Updates to VI and VF require us to update the status of
   5315     * virtual interrupts, which are the logical OR of these bits
   5316     * and the state of the input lines from the GIC. (This requires
   5317     * that we have the iothread lock, which is done by marking the
   5318     * reginfo structs as ARM_CP_IO.)
   5319     * Note that if a write to HCR pends a VIRQ or VFIQ it is never
   5320     * possible for it to be taken immediately, because VIRQ and
   5321     * VFIQ are masked unless running at EL0 or EL1, and HCR
   5322     * can only be written at EL2.
   5323     */
   5324    g_assert(qemu_mutex_iothread_locked());
   5325    arm_cpu_update_virq(cpu);
   5326    arm_cpu_update_vfiq(cpu);
   5327}
   5328
   5329static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   5330{
   5331    do_hcr_write(env, value, 0);
   5332}
   5333
   5334static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
   5335                          uint64_t value)
   5336{
   5337    /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
   5338    value = deposit64(env->cp15.hcr_el2, 32, 32, value);
   5339    do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
   5340}
   5341
   5342static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
   5343                         uint64_t value)
   5344{
   5345    /* Handle HCR write, i.e. write to low half of HCR_EL2 */
   5346    value = deposit64(env->cp15.hcr_el2, 0, 32, value);
   5347    do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
   5348}
   5349
   5350/*
   5351 * Return the effective value of HCR_EL2.
   5352 * Bits that are not included here:
   5353 * RW       (read from SCR_EL3.RW as needed)
   5354 */
   5355uint64_t arm_hcr_el2_eff(CPUARMState *env)
   5356{
   5357    uint64_t ret = env->cp15.hcr_el2;
   5358
   5359    if (!arm_is_el2_enabled(env)) {
   5360        /*
   5361         * "This register has no effect if EL2 is not enabled in the
   5362         * current Security state".  This is ARMv8.4-SecEL2 speak for
   5363         * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
   5364         *
   5365         * Prior to that, the language was "In an implementation that
   5366         * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
   5367         * as if this field is 0 for all purposes other than a direct
   5368         * read or write access of HCR_EL2".  With lots of enumeration
   5369         * on a per-field basis.  In current QEMU, this is condition
   5370         * is arm_is_secure_below_el3.
   5371         *
   5372         * Since the v8.4 language applies to the entire register, and
   5373         * appears to be backward compatible, use that.
   5374         */
   5375        return 0;
   5376    }
   5377
   5378    /*
   5379     * For a cpu that supports both aarch64 and aarch32, we can set bits
   5380     * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
   5381     * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
   5382     */
   5383    if (!arm_el_is_aa64(env, 2)) {
   5384        uint64_t aa32_valid;
   5385
   5386        /*
   5387         * These bits are up-to-date as of ARMv8.6.
   5388         * For HCR, it's easiest to list just the 2 bits that are invalid.
   5389         * For HCR2, list those that are valid.
   5390         */
   5391        aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
   5392        aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
   5393                       HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
   5394        ret &= aa32_valid;
   5395    }
   5396
   5397    if (ret & HCR_TGE) {
   5398        /* These bits are up-to-date as of ARMv8.6.  */
   5399        if (ret & HCR_E2H) {
   5400            ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
   5401                     HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
   5402                     HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
   5403                     HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
   5404                     HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
   5405                     HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
   5406        } else {
   5407            ret |= HCR_FMO | HCR_IMO | HCR_AMO;
   5408        }
   5409        ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
   5410                 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
   5411                 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
   5412                 HCR_TLOR);
   5413    }
   5414
   5415    return ret;
   5416}
   5417
   5418static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   5419                           uint64_t value)
   5420{
   5421    /*
   5422     * For A-profile AArch32 EL3, if NSACR.CP10
   5423     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
   5424     */
   5425    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
   5426        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
   5427        value &= ~(0x3 << 10);
   5428        value |= env->cp15.cptr_el[2] & (0x3 << 10);
   5429    }
   5430    env->cp15.cptr_el[2] = value;
   5431}
   5432
   5433static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
   5434{
   5435    /*
   5436     * For A-profile AArch32 EL3, if NSACR.CP10
   5437     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
   5438     */
   5439    uint64_t value = env->cp15.cptr_el[2];
   5440
   5441    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
   5442        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
   5443        value |= 0x3 << 10;
   5444    }
   5445    return value;
   5446}
   5447
   5448static const ARMCPRegInfo el2_cp_reginfo[] = {
   5449    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
   5450      .type = ARM_CP_IO,
   5451      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
   5452      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
   5453      .writefn = hcr_write },
   5454    { .name = "HCR", .state = ARM_CP_STATE_AA32,
   5455      .type = ARM_CP_ALIAS | ARM_CP_IO,
   5456      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
   5457      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
   5458      .writefn = hcr_writelow },
   5459    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
   5460      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
   5461      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5462    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
   5463      .type = ARM_CP_ALIAS,
   5464      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
   5465      .access = PL2_RW,
   5466      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
   5467    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
   5468      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
   5469      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
   5470    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
   5471      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
   5472      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
   5473    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
   5474      .type = ARM_CP_ALIAS,
   5475      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
   5476      .access = PL2_RW,
   5477      .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
   5478    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
   5479      .type = ARM_CP_ALIAS,
   5480      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
   5481      .access = PL2_RW,
   5482      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
   5483    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
   5484      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
   5485      .access = PL2_RW, .writefn = vbar_write,
   5486      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
   5487      .resetvalue = 0 },
   5488    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
   5489      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
   5490      .access = PL3_RW, .type = ARM_CP_ALIAS,
   5491      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
   5492    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
   5493      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
   5494      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
   5495      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
   5496      .readfn = cptr_el2_read, .writefn = cptr_el2_write },
   5497    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
   5498      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
   5499      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
   5500      .resetvalue = 0 },
   5501    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
   5502      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
   5503      .access = PL2_RW, .type = ARM_CP_ALIAS,
   5504      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
   5505    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
   5506      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
   5507      .access = PL2_RW, .type = ARM_CP_CONST,
   5508      .resetvalue = 0 },
   5509    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
   5510    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
   5511      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
   5512      .access = PL2_RW, .type = ARM_CP_CONST,
   5513      .resetvalue = 0 },
   5514    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
   5515      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
   5516      .access = PL2_RW, .type = ARM_CP_CONST,
   5517      .resetvalue = 0 },
   5518    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
   5519      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
   5520      .access = PL2_RW, .type = ARM_CP_CONST,
   5521      .resetvalue = 0 },
   5522    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
   5523      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
   5524      .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
   5525      /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
   5526      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
   5527    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
   5528      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
   5529      .type = ARM_CP_ALIAS,
   5530      .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5531      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
   5532    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
   5533      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
   5534      .access = PL2_RW,
   5535      /* no .writefn needed as this can't cause an ASID change;
   5536       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
   5537       */
   5538      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
   5539    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
   5540      .cp = 15, .opc1 = 6, .crm = 2,
   5541      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   5542      .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5543      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
   5544      .writefn = vttbr_write },
   5545    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
   5546      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
   5547      .access = PL2_RW, .writefn = vttbr_write,
   5548      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
   5549    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
   5550      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
   5551      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
   5552      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
   5553    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
   5554      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
   5555      .access = PL2_RW, .resetvalue = 0,
   5556      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
   5557    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
   5558      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
   5559      .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
   5560      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
   5561    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
   5562      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   5563      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
   5564    { .name = "TLBIALLNSNH",
   5565      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
   5566      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5567      .writefn = tlbiall_nsnh_write },
   5568    { .name = "TLBIALLNSNHIS",
   5569      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
   5570      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5571      .writefn = tlbiall_nsnh_is_write },
   5572    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
   5573      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5574      .writefn = tlbiall_hyp_write },
   5575    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
   5576      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5577      .writefn = tlbiall_hyp_is_write },
   5578    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
   5579      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5580      .writefn = tlbimva_hyp_write },
   5581    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
   5582      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5583      .writefn = tlbimva_hyp_is_write },
   5584    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
   5585      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
   5586      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5587      .writefn = tlbi_aa64_alle2_write },
   5588    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
   5589      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
   5590      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5591      .writefn = tlbi_aa64_vae2_write },
   5592    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
   5593      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
   5594      .access = PL2_W, .type = ARM_CP_NO_RAW,
   5595      .writefn = tlbi_aa64_vae2_write },
   5596    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
   5597      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
   5598      .access = PL2_W, .type = ARM_CP_NO_RAW,
   5599      .writefn = tlbi_aa64_alle2is_write },
   5600    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
   5601      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
   5602      .type = ARM_CP_NO_RAW, .access = PL2_W,
   5603      .writefn = tlbi_aa64_vae2is_write },
   5604    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
   5605      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
   5606      .access = PL2_W, .type = ARM_CP_NO_RAW,
   5607      .writefn = tlbi_aa64_vae2is_write },
   5608#ifndef CONFIG_USER_ONLY
   5609    /* Unlike the other EL2-related AT operations, these must
   5610     * UNDEF from EL3 if EL2 is not implemented, which is why we
   5611     * define them here rather than with the rest of the AT ops.
   5612     */
   5613    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
   5614      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
   5615      .access = PL2_W, .accessfn = at_s1e2_access,
   5616      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
   5617    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
   5618      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
   5619      .access = PL2_W, .accessfn = at_s1e2_access,
   5620      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
   5621    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
   5622     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
   5623     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
   5624     * to behave as if SCR.NS was 1.
   5625     */
   5626    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
   5627      .access = PL2_W,
   5628      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
   5629    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
   5630      .access = PL2_W,
   5631      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
   5632    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
   5633      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
   5634      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
   5635       * reset values as IMPDEF. We choose to reset to 3 to comply with
   5636       * both ARMv7 and ARMv8.
   5637       */
   5638      .access = PL2_RW, .resetvalue = 3,
   5639      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
   5640    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
   5641      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
   5642      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
   5643      .writefn = gt_cntvoff_write,
   5644      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
   5645    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
   5646      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
   5647      .writefn = gt_cntvoff_write,
   5648      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
   5649    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
   5650      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
   5651      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
   5652      .type = ARM_CP_IO, .access = PL2_RW,
   5653      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
   5654    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
   5655      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
   5656      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
   5657      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
   5658    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
   5659      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
   5660      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
   5661      .resetfn = gt_hyp_timer_reset,
   5662      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
   5663    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
   5664      .type = ARM_CP_IO,
   5665      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
   5666      .access = PL2_RW,
   5667      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
   5668      .resetvalue = 0,
   5669      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
   5670#endif
   5671    /* The only field of MDCR_EL2 that has a defined architectural reset value
   5672     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
   5673     */
   5674    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
   5675      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
   5676      .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
   5677      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
   5678    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
   5679      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
   5680      .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5681      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
   5682    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
   5683      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
   5684      .access = PL2_RW,
   5685      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
   5686    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
   5687      .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
   5688      .access = PL2_RW,
   5689      .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
   5690    REGINFO_SENTINEL
   5691};
   5692
   5693static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
   5694    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
   5695      .type = ARM_CP_ALIAS | ARM_CP_IO,
   5696      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
   5697      .access = PL2_RW,
   5698      .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
   5699      .writefn = hcr_writehigh },
   5700    REGINFO_SENTINEL
   5701};
   5702
   5703static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
   5704                                  bool isread)
   5705{
   5706    if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
   5707        return CP_ACCESS_OK;
   5708    }
   5709    return CP_ACCESS_TRAP_UNCATEGORIZED;
   5710}
   5711
   5712static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
   5713    { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
   5714      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
   5715      .access = PL2_RW, .accessfn = sel2_access,
   5716      .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
   5717    { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
   5718      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
   5719      .access = PL2_RW, .accessfn = sel2_access,
   5720      .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
   5721    REGINFO_SENTINEL
   5722};
   5723
   5724static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
   5725                                   bool isread)
   5726{
   5727    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
   5728     * At Secure EL1 it traps to EL3 or EL2.
   5729     */
   5730    if (arm_current_el(env) == 3) {
   5731        return CP_ACCESS_OK;
   5732    }
   5733    if (arm_is_secure_below_el3(env)) {
   5734        if (env->cp15.scr_el3 & SCR_EEL2) {
   5735            return CP_ACCESS_TRAP_EL2;
   5736        }
   5737        return CP_ACCESS_TRAP_EL3;
   5738    }
   5739    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
   5740    if (isread) {
   5741        return CP_ACCESS_OK;
   5742    }
   5743    return CP_ACCESS_TRAP_UNCATEGORIZED;
   5744}
   5745
   5746static const ARMCPRegInfo el3_cp_reginfo[] = {
   5747    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
   5748      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
   5749      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
   5750      .resetfn = scr_reset, .writefn = scr_write },
   5751    { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
   5752      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
   5753      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
   5754      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
   5755      .writefn = scr_write },
   5756    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
   5757      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
   5758      .access = PL3_RW, .resetvalue = 0,
   5759      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
   5760    { .name = "SDER",
   5761      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
   5762      .access = PL3_RW, .resetvalue = 0,
   5763      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
   5764    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
   5765      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
   5766      .writefn = vbar_write, .resetvalue = 0,
   5767      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
   5768    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
   5769      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
   5770      .access = PL3_RW, .resetvalue = 0,
   5771      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
   5772    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
   5773      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
   5774      .access = PL3_RW,
   5775      /* no .writefn needed as this can't cause an ASID change;
   5776       * we must provide a .raw_writefn and .resetfn because we handle
   5777       * reset and migration for the AArch32 TTBCR(S), which might be
   5778       * using mask and base_mask.
   5779       */
   5780      .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
   5781      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
   5782    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
   5783      .type = ARM_CP_ALIAS,
   5784      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
   5785      .access = PL3_RW,
   5786      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
   5787    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
   5788      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
   5789      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
   5790    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
   5791      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
   5792      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
   5793    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
   5794      .type = ARM_CP_ALIAS,
   5795      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
   5796      .access = PL3_RW,
   5797      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
   5798    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
   5799      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
   5800      .access = PL3_RW, .writefn = vbar_write,
   5801      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
   5802      .resetvalue = 0 },
   5803    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
   5804      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
   5805      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
   5806      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
   5807    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
   5808      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
   5809      .access = PL3_RW, .resetvalue = 0,
   5810      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
   5811    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
   5812      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
   5813      .access = PL3_RW, .type = ARM_CP_CONST,
   5814      .resetvalue = 0 },
   5815    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
   5816      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
   5817      .access = PL3_RW, .type = ARM_CP_CONST,
   5818      .resetvalue = 0 },
   5819    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
   5820      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
   5821      .access = PL3_RW, .type = ARM_CP_CONST,
   5822      .resetvalue = 0 },
   5823    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
   5824      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
   5825      .access = PL3_W, .type = ARM_CP_NO_RAW,
   5826      .writefn = tlbi_aa64_alle3is_write },
   5827    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
   5828      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
   5829      .access = PL3_W, .type = ARM_CP_NO_RAW,
   5830      .writefn = tlbi_aa64_vae3is_write },
   5831    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
   5832      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
   5833      .access = PL3_W, .type = ARM_CP_NO_RAW,
   5834      .writefn = tlbi_aa64_vae3is_write },
   5835    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
   5836      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
   5837      .access = PL3_W, .type = ARM_CP_NO_RAW,
   5838      .writefn = tlbi_aa64_alle3_write },
   5839    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
   5840      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
   5841      .access = PL3_W, .type = ARM_CP_NO_RAW,
   5842      .writefn = tlbi_aa64_vae3_write },
   5843    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
   5844      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
   5845      .access = PL3_W, .type = ARM_CP_NO_RAW,
   5846      .writefn = tlbi_aa64_vae3_write },
   5847    REGINFO_SENTINEL
   5848};
   5849
   5850#ifndef CONFIG_USER_ONLY
   5851/* Test if system register redirection is to occur in the current state.  */
   5852static bool redirect_for_e2h(CPUARMState *env)
   5853{
   5854    return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
   5855}
   5856
   5857static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
   5858{
   5859    CPReadFn *readfn;
   5860
   5861    if (redirect_for_e2h(env)) {
   5862        /* Switch to the saved EL2 version of the register.  */
   5863        ri = ri->opaque;
   5864        readfn = ri->readfn;
   5865    } else {
   5866        readfn = ri->orig_readfn;
   5867    }
   5868    if (readfn == NULL) {
   5869        readfn = raw_read;
   5870    }
   5871    return readfn(env, ri);
   5872}
   5873
   5874static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
   5875                          uint64_t value)
   5876{
   5877    CPWriteFn *writefn;
   5878
   5879    if (redirect_for_e2h(env)) {
   5880        /* Switch to the saved EL2 version of the register.  */
   5881        ri = ri->opaque;
   5882        writefn = ri->writefn;
   5883    } else {
   5884        writefn = ri->orig_writefn;
   5885    }
   5886    if (writefn == NULL) {
   5887        writefn = raw_write;
   5888    }
   5889    writefn(env, ri, value);
   5890}
   5891
   5892static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
   5893{
   5894    struct E2HAlias {
   5895        uint32_t src_key, dst_key, new_key;
   5896        const char *src_name, *dst_name, *new_name;
   5897        bool (*feature)(const ARMISARegisters *id);
   5898    };
   5899
   5900#define K(op0, op1, crn, crm, op2) \
   5901    ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
   5902
   5903    static const struct E2HAlias aliases[] = {
   5904        { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
   5905          "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
   5906        { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
   5907          "CPACR", "CPTR_EL2", "CPACR_EL12" },
   5908        { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
   5909          "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
   5910        { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
   5911          "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
   5912        { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
   5913          "TCR_EL1", "TCR_EL2", "TCR_EL12" },
   5914        { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
   5915          "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
   5916        { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
   5917          "ELR_EL1", "ELR_EL2", "ELR_EL12" },
   5918        { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
   5919          "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
   5920        { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
   5921          "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
   5922        { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
   5923          "ESR_EL1", "ESR_EL2", "ESR_EL12" },
   5924        { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
   5925          "FAR_EL1", "FAR_EL2", "FAR_EL12" },
   5926        { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
   5927          "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
   5928        { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
   5929          "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
   5930        { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
   5931          "VBAR", "VBAR_EL2", "VBAR_EL12" },
   5932        { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
   5933          "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
   5934        { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
   5935          "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
   5936
   5937        /*
   5938         * Note that redirection of ZCR is mentioned in the description
   5939         * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
   5940         * not in the summary table.
   5941         */
   5942        { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
   5943          "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
   5944
   5945        { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
   5946          "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
   5947
   5948        /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
   5949        /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
   5950    };
   5951#undef K
   5952
   5953    size_t i;
   5954
   5955    for (i = 0; i < ARRAY_SIZE(aliases); i++) {
   5956        const struct E2HAlias *a = &aliases[i];
   5957        ARMCPRegInfo *src_reg, *dst_reg;
   5958
   5959        if (a->feature && !a->feature(&cpu->isar)) {
   5960            continue;
   5961        }
   5962
   5963        src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
   5964        dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
   5965        g_assert(src_reg != NULL);
   5966        g_assert(dst_reg != NULL);
   5967
   5968        /* Cross-compare names to detect typos in the keys.  */
   5969        g_assert(strcmp(src_reg->name, a->src_name) == 0);
   5970        g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
   5971
   5972        /* None of the core system registers use opaque; we will.  */
   5973        g_assert(src_reg->opaque == NULL);
   5974
   5975        /* Create alias before redirection so we dup the right data. */
   5976        if (a->new_key) {
   5977            ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
   5978            uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
   5979            bool ok;
   5980
   5981            new_reg->name = a->new_name;
   5982            new_reg->type |= ARM_CP_ALIAS;
   5983            /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
   5984            new_reg->access &= PL2_RW | PL3_RW;
   5985
   5986            ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
   5987            g_assert(ok);
   5988        }
   5989
   5990        src_reg->opaque = dst_reg;
   5991        src_reg->orig_readfn = src_reg->readfn ?: raw_read;
   5992        src_reg->orig_writefn = src_reg->writefn ?: raw_write;
   5993        if (!src_reg->raw_readfn) {
   5994            src_reg->raw_readfn = raw_read;
   5995        }
   5996        if (!src_reg->raw_writefn) {
   5997            src_reg->raw_writefn = raw_write;
   5998        }
   5999        src_reg->readfn = el2_e2h_read;
   6000        src_reg->writefn = el2_e2h_write;
   6001    }
   6002}
   6003#endif
   6004
   6005static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
   6006                                     bool isread)
   6007{
   6008    int cur_el = arm_current_el(env);
   6009
   6010    if (cur_el < 2) {
   6011        uint64_t hcr = arm_hcr_el2_eff(env);
   6012
   6013        if (cur_el == 0) {
   6014            if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   6015                if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
   6016                    return CP_ACCESS_TRAP_EL2;
   6017                }
   6018            } else {
   6019                if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
   6020                    return CP_ACCESS_TRAP;
   6021                }
   6022                if (hcr & HCR_TID2) {
   6023                    return CP_ACCESS_TRAP_EL2;
   6024                }
   6025            }
   6026        } else if (hcr & HCR_TID2) {
   6027            return CP_ACCESS_TRAP_EL2;
   6028        }
   6029    }
   6030
   6031    if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
   6032        return CP_ACCESS_TRAP_EL2;
   6033    }
   6034
   6035    return CP_ACCESS_OK;
   6036}
   6037
   6038static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6039                        uint64_t value)
   6040{
   6041    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
   6042     * read via a bit in OSLSR_EL1.
   6043     */
   6044    int oslock;
   6045
   6046    if (ri->state == ARM_CP_STATE_AA32) {
   6047        oslock = (value == 0xC5ACCE55);
   6048    } else {
   6049        oslock = value & 1;
   6050    }
   6051
   6052    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
   6053}
   6054
   6055static const ARMCPRegInfo debug_cp_reginfo[] = {
   6056    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
   6057     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
   6058     * unlike DBGDRAR it is never accessible from EL0.
   6059     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
   6060     * accessor.
   6061     */
   6062    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
   6063      .access = PL0_R, .accessfn = access_tdra,
   6064      .type = ARM_CP_CONST, .resetvalue = 0 },
   6065    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
   6066      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
   6067      .access = PL1_R, .accessfn = access_tdra,
   6068      .type = ARM_CP_CONST, .resetvalue = 0 },
   6069    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
   6070      .access = PL0_R, .accessfn = access_tdra,
   6071      .type = ARM_CP_CONST, .resetvalue = 0 },
   6072    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
   6073    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
   6074      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
   6075      .access = PL1_RW, .accessfn = access_tda,
   6076      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
   6077      .resetvalue = 0 },
   6078    /*
   6079     * MDCCSR_EL0[30:29] map to EDSCR[30:29].  Simply RAZ as the external
   6080     * Debug Communication Channel is not implemented.
   6081     */
   6082    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
   6083      .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
   6084      .access = PL0_R, .accessfn = access_tda,
   6085      .type = ARM_CP_CONST, .resetvalue = 0 },
   6086    /*
   6087     * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2].  Map all bits as
   6088     * it is unlikely a guest will care.
   6089     * We don't implement the configurable EL0 access.
   6090     */
   6091    { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32,
   6092      .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
   6093      .type = ARM_CP_ALIAS,
   6094      .access = PL1_R, .accessfn = access_tda,
   6095      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
   6096    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
   6097      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
   6098      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6099      .accessfn = access_tdosa,
   6100      .writefn = oslar_write },
   6101    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
   6102      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
   6103      .access = PL1_R, .resetvalue = 10,
   6104      .accessfn = access_tdosa,
   6105      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
   6106    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
   6107    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
   6108      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
   6109      .access = PL1_RW, .accessfn = access_tdosa,
   6110      .type = ARM_CP_NOP },
   6111    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
   6112     * implement vector catch debug events yet.
   6113     */
   6114    { .name = "DBGVCR",
   6115      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
   6116      .access = PL1_RW, .accessfn = access_tda,
   6117      .type = ARM_CP_NOP },
   6118    /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
   6119     * to save and restore a 32-bit guest's DBGVCR)
   6120     */
   6121    { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
   6122      .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
   6123      .access = PL2_RW, .accessfn = access_tda,
   6124      .type = ARM_CP_NOP },
   6125    /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
   6126     * Channel but Linux may try to access this register. The 32-bit
   6127     * alias is DBGDCCINT.
   6128     */
   6129    { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
   6130      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
   6131      .access = PL1_RW, .accessfn = access_tda,
   6132      .type = ARM_CP_NOP },
   6133    REGINFO_SENTINEL
   6134};
   6135
   6136static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
   6137    /* 64 bit access versions of the (dummy) debug registers */
   6138    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
   6139      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
   6140    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
   6141      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
   6142    REGINFO_SENTINEL
   6143};
   6144
   6145/* Return the exception level to which exceptions should be taken
   6146 * via SVEAccessTrap.  If an exception should be routed through
   6147 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
   6148 * take care of raising that exception.
   6149 * C.f. the ARM pseudocode function CheckSVEEnabled.
   6150 */
   6151int sve_exception_el(CPUARMState *env, int el)
   6152{
   6153#ifndef CONFIG_USER_ONLY
   6154    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
   6155
   6156    if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
   6157        bool disabled = false;
   6158
   6159        /* The CPACR.ZEN controls traps to EL1:
   6160         * 0, 2 : trap EL0 and EL1 accesses
   6161         * 1    : trap only EL0 accesses
   6162         * 3    : trap no accesses
   6163         */
   6164        if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
   6165            disabled = true;
   6166        } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
   6167            disabled = el == 0;
   6168        }
   6169        if (disabled) {
   6170            /* route_to_el2 */
   6171            return hcr_el2 & HCR_TGE ? 2 : 1;
   6172        }
   6173
   6174        /* Check CPACR.FPEN.  */
   6175        if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
   6176            disabled = true;
   6177        } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
   6178            disabled = el == 0;
   6179        }
   6180        if (disabled) {
   6181            return 0;
   6182        }
   6183    }
   6184
   6185    /* CPTR_EL2.  Since TZ and TFP are positive,
   6186     * they will be zero when EL2 is not present.
   6187     */
   6188    if (el <= 2 && arm_is_el2_enabled(env)) {
   6189        if (env->cp15.cptr_el[2] & CPTR_TZ) {
   6190            return 2;
   6191        }
   6192        if (env->cp15.cptr_el[2] & CPTR_TFP) {
   6193            return 0;
   6194        }
   6195    }
   6196
   6197    /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
   6198    if (arm_feature(env, ARM_FEATURE_EL3)
   6199        && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
   6200        return 3;
   6201    }
   6202#endif
   6203    return 0;
   6204}
   6205
   6206uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
   6207{
   6208    uint32_t end_len;
   6209
   6210    start_len = MIN(start_len, ARM_MAX_VQ - 1);
   6211    end_len = start_len;
   6212
   6213    if (!test_bit(start_len, cpu->sve_vq_map)) {
   6214        end_len = find_last_bit(cpu->sve_vq_map, start_len);
   6215        assert(end_len < start_len);
   6216    }
   6217    return end_len;
   6218}
   6219
   6220/*
   6221 * Given that SVE is enabled, return the vector length for EL.
   6222 */
   6223uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
   6224{
   6225    ARMCPU *cpu = env_archcpu(env);
   6226    uint32_t zcr_len = cpu->sve_max_vq - 1;
   6227
   6228    if (el <= 1) {
   6229        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
   6230    }
   6231    if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
   6232        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
   6233    }
   6234    if (arm_feature(env, ARM_FEATURE_EL3)) {
   6235        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
   6236    }
   6237
   6238    return aarch64_sve_zcr_get_valid_len(cpu, zcr_len);
   6239}
   6240
   6241static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6242                      uint64_t value)
   6243{
   6244    int cur_el = arm_current_el(env);
   6245    int old_len = sve_zcr_len_for_el(env, cur_el);
   6246    int new_len;
   6247
   6248    /* Bits other than [3:0] are RAZ/WI.  */
   6249    QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
   6250    raw_write(env, ri, value & 0xf);
   6251
   6252    /*
   6253     * Because we arrived here, we know both FP and SVE are enabled;
   6254     * otherwise we would have trapped access to the ZCR_ELn register.
   6255     */
   6256    new_len = sve_zcr_len_for_el(env, cur_el);
   6257    if (new_len < old_len) {
   6258        aarch64_sve_narrow_vq(env, new_len + 1);
   6259    }
   6260}
   6261
   6262static const ARMCPRegInfo zcr_el1_reginfo = {
   6263    .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
   6264    .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
   6265    .access = PL1_RW, .type = ARM_CP_SVE,
   6266    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
   6267    .writefn = zcr_write, .raw_writefn = raw_write
   6268};
   6269
   6270static const ARMCPRegInfo zcr_el2_reginfo = {
   6271    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
   6272    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
   6273    .access = PL2_RW, .type = ARM_CP_SVE,
   6274    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
   6275    .writefn = zcr_write, .raw_writefn = raw_write
   6276};
   6277
   6278static const ARMCPRegInfo zcr_no_el2_reginfo = {
   6279    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
   6280    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
   6281    .access = PL2_RW, .type = ARM_CP_SVE,
   6282    .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
   6283};
   6284
   6285static const ARMCPRegInfo zcr_el3_reginfo = {
   6286    .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
   6287    .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
   6288    .access = PL3_RW, .type = ARM_CP_SVE,
   6289    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
   6290    .writefn = zcr_write, .raw_writefn = raw_write
   6291};
   6292
   6293void hw_watchpoint_update(ARMCPU *cpu, int n)
   6294{
   6295    CPUARMState *env = &cpu->env;
   6296    vaddr len = 0;
   6297    vaddr wvr = env->cp15.dbgwvr[n];
   6298    uint64_t wcr = env->cp15.dbgwcr[n];
   6299    int mask;
   6300    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
   6301
   6302    if (env->cpu_watchpoint[n]) {
   6303        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
   6304        env->cpu_watchpoint[n] = NULL;
   6305    }
   6306
   6307    if (!extract64(wcr, 0, 1)) {
   6308        /* E bit clear : watchpoint disabled */
   6309        return;
   6310    }
   6311
   6312    switch (extract64(wcr, 3, 2)) {
   6313    case 0:
   6314        /* LSC 00 is reserved and must behave as if the wp is disabled */
   6315        return;
   6316    case 1:
   6317        flags |= BP_MEM_READ;
   6318        break;
   6319    case 2:
   6320        flags |= BP_MEM_WRITE;
   6321        break;
   6322    case 3:
   6323        flags |= BP_MEM_ACCESS;
   6324        break;
   6325    }
   6326
   6327    /* Attempts to use both MASK and BAS fields simultaneously are
   6328     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
   6329     * thus generating a watchpoint for every byte in the masked region.
   6330     */
   6331    mask = extract64(wcr, 24, 4);
   6332    if (mask == 1 || mask == 2) {
   6333        /* Reserved values of MASK; we must act as if the mask value was
   6334         * some non-reserved value, or as if the watchpoint were disabled.
   6335         * We choose the latter.
   6336         */
   6337        return;
   6338    } else if (mask) {
   6339        /* Watchpoint covers an aligned area up to 2GB in size */
   6340        len = 1ULL << mask;
   6341        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
   6342         * whether the watchpoint fires when the unmasked bits match; we opt
   6343         * to generate the exceptions.
   6344         */
   6345        wvr &= ~(len - 1);
   6346    } else {
   6347        /* Watchpoint covers bytes defined by the byte address select bits */
   6348        int bas = extract64(wcr, 5, 8);
   6349        int basstart;
   6350
   6351        if (extract64(wvr, 2, 1)) {
   6352            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
   6353             * ignored, and BAS[3:0] define which bytes to watch.
   6354             */
   6355            bas &= 0xf;
   6356        }
   6357
   6358        if (bas == 0) {
   6359            /* This must act as if the watchpoint is disabled */
   6360            return;
   6361        }
   6362
   6363        /* The BAS bits are supposed to be programmed to indicate a contiguous
   6364         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
   6365         * we fire for each byte in the word/doubleword addressed by the WVR.
   6366         * We choose to ignore any non-zero bits after the first range of 1s.
   6367         */
   6368        basstart = ctz32(bas);
   6369        len = cto32(bas >> basstart);
   6370        wvr += basstart;
   6371    }
   6372
   6373    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
   6374                          &env->cpu_watchpoint[n]);
   6375}
   6376
   6377void hw_watchpoint_update_all(ARMCPU *cpu)
   6378{
   6379    int i;
   6380    CPUARMState *env = &cpu->env;
   6381
   6382    /* Completely clear out existing QEMU watchpoints and our array, to
   6383     * avoid possible stale entries following migration load.
   6384     */
   6385    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
   6386    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
   6387
   6388    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
   6389        hw_watchpoint_update(cpu, i);
   6390    }
   6391}
   6392
   6393static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6394                         uint64_t value)
   6395{
   6396    ARMCPU *cpu = env_archcpu(env);
   6397    int i = ri->crm;
   6398
   6399    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
   6400     * register reads and behaves as if values written are sign extended.
   6401     * Bits [1:0] are RES0.
   6402     */
   6403    value = sextract64(value, 0, 49) & ~3ULL;
   6404
   6405    raw_write(env, ri, value);
   6406    hw_watchpoint_update(cpu, i);
   6407}
   6408
   6409static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6410                         uint64_t value)
   6411{
   6412    ARMCPU *cpu = env_archcpu(env);
   6413    int i = ri->crm;
   6414
   6415    raw_write(env, ri, value);
   6416    hw_watchpoint_update(cpu, i);
   6417}
   6418
   6419void hw_breakpoint_update(ARMCPU *cpu, int n)
   6420{
   6421    CPUARMState *env = &cpu->env;
   6422    uint64_t bvr = env->cp15.dbgbvr[n];
   6423    uint64_t bcr = env->cp15.dbgbcr[n];
   6424    vaddr addr;
   6425    int bt;
   6426    int flags = BP_CPU;
   6427
   6428    if (env->cpu_breakpoint[n]) {
   6429        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
   6430        env->cpu_breakpoint[n] = NULL;
   6431    }
   6432
   6433    if (!extract64(bcr, 0, 1)) {
   6434        /* E bit clear : watchpoint disabled */
   6435        return;
   6436    }
   6437
   6438    bt = extract64(bcr, 20, 4);
   6439
   6440    switch (bt) {
   6441    case 4: /* unlinked address mismatch (reserved if AArch64) */
   6442    case 5: /* linked address mismatch (reserved if AArch64) */
   6443        qemu_log_mask(LOG_UNIMP,
   6444                      "arm: address mismatch breakpoint types not implemented\n");
   6445        return;
   6446    case 0: /* unlinked address match */
   6447    case 1: /* linked address match */
   6448    {
   6449        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
   6450         * we behave as if the register was sign extended. Bits [1:0] are
   6451         * RES0. The BAS field is used to allow setting breakpoints on 16
   6452         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
   6453         * a bp will fire if the addresses covered by the bp and the addresses
   6454         * covered by the insn overlap but the insn doesn't start at the
   6455         * start of the bp address range. We choose to require the insn and
   6456         * the bp to have the same address. The constraints on writing to
   6457         * BAS enforced in dbgbcr_write mean we have only four cases:
   6458         *  0b0000  => no breakpoint
   6459         *  0b0011  => breakpoint on addr
   6460         *  0b1100  => breakpoint on addr + 2
   6461         *  0b1111  => breakpoint on addr
   6462         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
   6463         */
   6464        int bas = extract64(bcr, 5, 4);
   6465        addr = sextract64(bvr, 0, 49) & ~3ULL;
   6466        if (bas == 0) {
   6467            return;
   6468        }
   6469        if (bas == 0xc) {
   6470            addr += 2;
   6471        }
   6472        break;
   6473    }
   6474    case 2: /* unlinked context ID match */
   6475    case 8: /* unlinked VMID match (reserved if no EL2) */
   6476    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
   6477        qemu_log_mask(LOG_UNIMP,
   6478                      "arm: unlinked context breakpoint types not implemented\n");
   6479        return;
   6480    case 9: /* linked VMID match (reserved if no EL2) */
   6481    case 11: /* linked context ID and VMID match (reserved if no EL2) */
   6482    case 3: /* linked context ID match */
   6483    default:
   6484        /* We must generate no events for Linked context matches (unless
   6485         * they are linked to by some other bp/wp, which is handled in
   6486         * updates for the linking bp/wp). We choose to also generate no events
   6487         * for reserved values.
   6488         */
   6489        return;
   6490    }
   6491
   6492    cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
   6493}
   6494
   6495void hw_breakpoint_update_all(ARMCPU *cpu)
   6496{
   6497    int i;
   6498    CPUARMState *env = &cpu->env;
   6499
   6500    /* Completely clear out existing QEMU breakpoints and our array, to
   6501     * avoid possible stale entries following migration load.
   6502     */
   6503    cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
   6504    memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
   6505
   6506    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
   6507        hw_breakpoint_update(cpu, i);
   6508    }
   6509}
   6510
   6511static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6512                         uint64_t value)
   6513{
   6514    ARMCPU *cpu = env_archcpu(env);
   6515    int i = ri->crm;
   6516
   6517    raw_write(env, ri, value);
   6518    hw_breakpoint_update(cpu, i);
   6519}
   6520
   6521static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6522                         uint64_t value)
   6523{
   6524    ARMCPU *cpu = env_archcpu(env);
   6525    int i = ri->crm;
   6526
   6527    /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
   6528     * copy of BAS[0].
   6529     */
   6530    value = deposit64(value, 6, 1, extract64(value, 5, 1));
   6531    value = deposit64(value, 8, 1, extract64(value, 7, 1));
   6532
   6533    raw_write(env, ri, value);
   6534    hw_breakpoint_update(cpu, i);
   6535}
   6536
   6537static void define_debug_regs(ARMCPU *cpu)
   6538{
   6539    /* Define v7 and v8 architectural debug registers.
   6540     * These are just dummy implementations for now.
   6541     */
   6542    int i;
   6543    int wrps, brps, ctx_cmps;
   6544
   6545    /*
   6546     * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
   6547     * use AArch32.  Given that bit 15 is RES1, if the value is 0 then
   6548     * the register must not exist for this cpu.
   6549     */
   6550    if (cpu->isar.dbgdidr != 0) {
   6551        ARMCPRegInfo dbgdidr = {
   6552            .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0,
   6553            .opc1 = 0, .opc2 = 0,
   6554            .access = PL0_R, .accessfn = access_tda,
   6555            .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
   6556        };
   6557        define_one_arm_cp_reg(cpu, &dbgdidr);
   6558    }
   6559
   6560    /* Note that all these register fields hold "number of Xs minus 1". */
   6561    brps = arm_num_brps(cpu);
   6562    wrps = arm_num_wrps(cpu);
   6563    ctx_cmps = arm_num_ctx_cmps(cpu);
   6564
   6565    assert(ctx_cmps <= brps);
   6566
   6567    define_arm_cp_regs(cpu, debug_cp_reginfo);
   6568
   6569    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
   6570        define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
   6571    }
   6572
   6573    for (i = 0; i < brps; i++) {
   6574        ARMCPRegInfo dbgregs[] = {
   6575            { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
   6576              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
   6577              .access = PL1_RW, .accessfn = access_tda,
   6578              .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
   6579              .writefn = dbgbvr_write, .raw_writefn = raw_write
   6580            },
   6581            { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
   6582              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
   6583              .access = PL1_RW, .accessfn = access_tda,
   6584              .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
   6585              .writefn = dbgbcr_write, .raw_writefn = raw_write
   6586            },
   6587            REGINFO_SENTINEL
   6588        };
   6589        define_arm_cp_regs(cpu, dbgregs);
   6590    }
   6591
   6592    for (i = 0; i < wrps; i++) {
   6593        ARMCPRegInfo dbgregs[] = {
   6594            { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
   6595              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
   6596              .access = PL1_RW, .accessfn = access_tda,
   6597              .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
   6598              .writefn = dbgwvr_write, .raw_writefn = raw_write
   6599            },
   6600            { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
   6601              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
   6602              .access = PL1_RW, .accessfn = access_tda,
   6603              .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
   6604              .writefn = dbgwcr_write, .raw_writefn = raw_write
   6605            },
   6606            REGINFO_SENTINEL
   6607        };
   6608        define_arm_cp_regs(cpu, dbgregs);
   6609    }
   6610}
   6611
   6612static void define_pmu_regs(ARMCPU *cpu)
   6613{
   6614    /*
   6615     * v7 performance monitor control register: same implementor
   6616     * field as main ID register, and we implement four counters in
   6617     * addition to the cycle count register.
   6618     */
   6619    unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
   6620    ARMCPRegInfo pmcr = {
   6621        .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
   6622        .access = PL0_RW,
   6623        .type = ARM_CP_IO | ARM_CP_ALIAS,
   6624        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
   6625        .accessfn = pmreg_access, .writefn = pmcr_write,
   6626        .raw_writefn = raw_write,
   6627    };
   6628    ARMCPRegInfo pmcr64 = {
   6629        .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
   6630        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
   6631        .access = PL0_RW, .accessfn = pmreg_access,
   6632        .type = ARM_CP_IO,
   6633        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
   6634        .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
   6635                      PMCRLC,
   6636        .writefn = pmcr_write, .raw_writefn = raw_write,
   6637    };
   6638    define_one_arm_cp_reg(cpu, &pmcr);
   6639    define_one_arm_cp_reg(cpu, &pmcr64);
   6640    for (i = 0; i < pmcrn; i++) {
   6641        char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
   6642        char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
   6643        char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
   6644        char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
   6645        ARMCPRegInfo pmev_regs[] = {
   6646            { .name = pmevcntr_name, .cp = 15, .crn = 14,
   6647              .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
   6648              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
   6649              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
   6650              .accessfn = pmreg_access },
   6651            { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
   6652              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
   6653              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
   6654              .type = ARM_CP_IO,
   6655              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
   6656              .raw_readfn = pmevcntr_rawread,
   6657              .raw_writefn = pmevcntr_rawwrite },
   6658            { .name = pmevtyper_name, .cp = 15, .crn = 14,
   6659              .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
   6660              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
   6661              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
   6662              .accessfn = pmreg_access },
   6663            { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
   6664              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
   6665              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
   6666              .type = ARM_CP_IO,
   6667              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
   6668              .raw_writefn = pmevtyper_rawwrite },
   6669            REGINFO_SENTINEL
   6670        };
   6671        define_arm_cp_regs(cpu, pmev_regs);
   6672        g_free(pmevcntr_name);
   6673        g_free(pmevcntr_el0_name);
   6674        g_free(pmevtyper_name);
   6675        g_free(pmevtyper_el0_name);
   6676    }
   6677    if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
   6678        ARMCPRegInfo v81_pmu_regs[] = {
   6679            { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
   6680              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
   6681              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   6682              .resetvalue = extract64(cpu->pmceid0, 32, 32) },
   6683            { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
   6684              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
   6685              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   6686              .resetvalue = extract64(cpu->pmceid1, 32, 32) },
   6687            REGINFO_SENTINEL
   6688        };
   6689        define_arm_cp_regs(cpu, v81_pmu_regs);
   6690    }
   6691    if (cpu_isar_feature(any_pmu_8_4, cpu)) {
   6692        static const ARMCPRegInfo v84_pmmir = {
   6693            .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
   6694            .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
   6695            .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   6696            .resetvalue = 0
   6697        };
   6698        define_one_arm_cp_reg(cpu, &v84_pmmir);
   6699    }
   6700}
   6701
   6702/* We don't know until after realize whether there's a GICv3
   6703 * attached, and that is what registers the gicv3 sysregs.
   6704 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
   6705 * at runtime.
   6706 */
   6707static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   6708{
   6709    ARMCPU *cpu = env_archcpu(env);
   6710    uint64_t pfr1 = cpu->isar.id_pfr1;
   6711
   6712    if (env->gicv3state) {
   6713        pfr1 |= 1 << 28;
   6714    }
   6715    return pfr1;
   6716}
   6717
   6718#ifndef CONFIG_USER_ONLY
   6719static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
   6720{
   6721    ARMCPU *cpu = env_archcpu(env);
   6722    uint64_t pfr0 = cpu->isar.id_aa64pfr0;
   6723
   6724    if (env->gicv3state) {
   6725        pfr0 |= 1 << 24;
   6726    }
   6727    return pfr0;
   6728}
   6729#endif
   6730
   6731/* Shared logic between LORID and the rest of the LOR* registers.
   6732 * Secure state exclusion has already been dealt with.
   6733 */
   6734static CPAccessResult access_lor_ns(CPUARMState *env,
   6735                                    const ARMCPRegInfo *ri, bool isread)
   6736{
   6737    int el = arm_current_el(env);
   6738
   6739    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
   6740        return CP_ACCESS_TRAP_EL2;
   6741    }
   6742    if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
   6743        return CP_ACCESS_TRAP_EL3;
   6744    }
   6745    return CP_ACCESS_OK;
   6746}
   6747
   6748static CPAccessResult access_lor_other(CPUARMState *env,
   6749                                       const ARMCPRegInfo *ri, bool isread)
   6750{
   6751    if (arm_is_secure_below_el3(env)) {
   6752        /* Access denied in secure mode.  */
   6753        return CP_ACCESS_TRAP;
   6754    }
   6755    return access_lor_ns(env, ri, isread);
   6756}
   6757
   6758/*
   6759 * A trivial implementation of ARMv8.1-LOR leaves all of these
   6760 * registers fixed at 0, which indicates that there are zero
   6761 * supported Limited Ordering regions.
   6762 */
   6763static const ARMCPRegInfo lor_reginfo[] = {
   6764    { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
   6765      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
   6766      .access = PL1_RW, .accessfn = access_lor_other,
   6767      .type = ARM_CP_CONST, .resetvalue = 0 },
   6768    { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
   6769      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
   6770      .access = PL1_RW, .accessfn = access_lor_other,
   6771      .type = ARM_CP_CONST, .resetvalue = 0 },
   6772    { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
   6773      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
   6774      .access = PL1_RW, .accessfn = access_lor_other,
   6775      .type = ARM_CP_CONST, .resetvalue = 0 },
   6776    { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
   6777      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
   6778      .access = PL1_RW, .accessfn = access_lor_other,
   6779      .type = ARM_CP_CONST, .resetvalue = 0 },
   6780    { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
   6781      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
   6782      .access = PL1_R, .accessfn = access_lor_ns,
   6783      .type = ARM_CP_CONST, .resetvalue = 0 },
   6784    REGINFO_SENTINEL
   6785};
   6786
   6787#ifdef TARGET_AARCH64
   6788static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
   6789                                   bool isread)
   6790{
   6791    int el = arm_current_el(env);
   6792
   6793    if (el < 2 &&
   6794        arm_feature(env, ARM_FEATURE_EL2) &&
   6795        !(arm_hcr_el2_eff(env) & HCR_APK)) {
   6796        return CP_ACCESS_TRAP_EL2;
   6797    }
   6798    if (el < 3 &&
   6799        arm_feature(env, ARM_FEATURE_EL3) &&
   6800        !(env->cp15.scr_el3 & SCR_APK)) {
   6801        return CP_ACCESS_TRAP_EL3;
   6802    }
   6803    return CP_ACCESS_OK;
   6804}
   6805
   6806static const ARMCPRegInfo pauth_reginfo[] = {
   6807    { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6808      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
   6809      .access = PL1_RW, .accessfn = access_pauth,
   6810      .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
   6811    { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6812      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
   6813      .access = PL1_RW, .accessfn = access_pauth,
   6814      .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
   6815    { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6816      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
   6817      .access = PL1_RW, .accessfn = access_pauth,
   6818      .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
   6819    { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6820      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
   6821      .access = PL1_RW, .accessfn = access_pauth,
   6822      .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
   6823    { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6824      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
   6825      .access = PL1_RW, .accessfn = access_pauth,
   6826      .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
   6827    { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6828      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
   6829      .access = PL1_RW, .accessfn = access_pauth,
   6830      .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
   6831    { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6832      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
   6833      .access = PL1_RW, .accessfn = access_pauth,
   6834      .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
   6835    { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6836      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
   6837      .access = PL1_RW, .accessfn = access_pauth,
   6838      .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
   6839    { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6840      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
   6841      .access = PL1_RW, .accessfn = access_pauth,
   6842      .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
   6843    { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6844      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
   6845      .access = PL1_RW, .accessfn = access_pauth,
   6846      .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
   6847    REGINFO_SENTINEL
   6848};
   6849
   6850static const ARMCPRegInfo tlbirange_reginfo[] = {
   6851    { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
   6852      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
   6853      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6854      .writefn = tlbi_aa64_rvae1is_write },
   6855    { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
   6856      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
   6857      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6858      .writefn = tlbi_aa64_rvae1is_write },
   6859   { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
   6860      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
   6861      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6862      .writefn = tlbi_aa64_rvae1is_write },
   6863    { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
   6864      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
   6865      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6866      .writefn = tlbi_aa64_rvae1is_write },
   6867    { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
   6868      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
   6869      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6870      .writefn = tlbi_aa64_rvae1is_write },
   6871    { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
   6872      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
   6873      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6874      .writefn = tlbi_aa64_rvae1is_write },
   6875   { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
   6876      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
   6877      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6878      .writefn = tlbi_aa64_rvae1is_write },
   6879    { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
   6880      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
   6881      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6882      .writefn = tlbi_aa64_rvae1is_write },
   6883    { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
   6884      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
   6885      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6886      .writefn = tlbi_aa64_rvae1_write },
   6887    { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
   6888      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
   6889      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6890      .writefn = tlbi_aa64_rvae1_write },
   6891   { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
   6892      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
   6893      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6894      .writefn = tlbi_aa64_rvae1_write },
   6895    { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
   6896      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
   6897      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6898      .writefn = tlbi_aa64_rvae1_write },
   6899    { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
   6900      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
   6901      .access = PL2_W, .type = ARM_CP_NOP },
   6902    { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
   6903      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
   6904      .access = PL2_W, .type = ARM_CP_NOP },
   6905    { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
   6906      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
   6907      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6908      .writefn = tlbi_aa64_rvae2is_write },
   6909   { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
   6910      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
   6911      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6912      .writefn = tlbi_aa64_rvae2is_write },
   6913    { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
   6914      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
   6915      .access = PL2_W, .type = ARM_CP_NOP },
   6916   { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
   6917      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
   6918      .access = PL2_W, .type = ARM_CP_NOP },
   6919   { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
   6920      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
   6921      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6922      .writefn = tlbi_aa64_rvae2is_write },
   6923   { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
   6924      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
   6925      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6926      .writefn = tlbi_aa64_rvae2is_write },
   6927    { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
   6928      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
   6929      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6930      .writefn = tlbi_aa64_rvae2_write },
   6931   { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
   6932      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
   6933      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6934      .writefn = tlbi_aa64_rvae2_write },
   6935   { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
   6936      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
   6937      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6938      .writefn = tlbi_aa64_rvae3is_write },
   6939   { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
   6940      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
   6941      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6942      .writefn = tlbi_aa64_rvae3is_write },
   6943   { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
   6944      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
   6945      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6946      .writefn = tlbi_aa64_rvae3is_write },
   6947   { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
   6948      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
   6949      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6950      .writefn = tlbi_aa64_rvae3is_write },
   6951   { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
   6952      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
   6953      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6954      .writefn = tlbi_aa64_rvae3_write },
   6955   { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
   6956      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
   6957      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6958      .writefn = tlbi_aa64_rvae3_write },
   6959    REGINFO_SENTINEL
   6960};
   6961
   6962static const ARMCPRegInfo tlbios_reginfo[] = {
   6963    { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
   6964      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
   6965      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6966      .writefn = tlbi_aa64_vmalle1is_write },
   6967    { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
   6968      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
   6969      .access = PL1_W, .type = ARM_CP_NO_RAW,
   6970      .writefn = tlbi_aa64_vmalle1is_write },
   6971    { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
   6972      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
   6973      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6974      .writefn = tlbi_aa64_alle2is_write },
   6975   { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
   6976      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
   6977      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6978      .writefn = tlbi_aa64_alle1is_write },
   6979    { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
   6980      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
   6981      .access = PL2_W, .type = ARM_CP_NO_RAW,
   6982      .writefn = tlbi_aa64_alle1is_write },
   6983    { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
   6984      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
   6985      .access = PL2_W, .type = ARM_CP_NOP },
   6986    { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
   6987      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
   6988      .access = PL2_W, .type = ARM_CP_NOP },
   6989    { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
   6990      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
   6991      .access = PL2_W, .type = ARM_CP_NOP },
   6992    { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
   6993      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
   6994      .access = PL2_W, .type = ARM_CP_NOP },
   6995    { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
   6996      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
   6997      .access = PL3_W, .type = ARM_CP_NO_RAW,
   6998      .writefn = tlbi_aa64_alle3is_write },
   6999    REGINFO_SENTINEL
   7000};
   7001
   7002static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
   7003{
   7004    Error *err = NULL;
   7005    uint64_t ret;
   7006
   7007    /* Success sets NZCV = 0000.  */
   7008    env->NF = env->CF = env->VF = 0, env->ZF = 1;
   7009
   7010    if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
   7011        /*
   7012         * ??? Failed, for unknown reasons in the crypto subsystem.
   7013         * The best we can do is log the reason and return the
   7014         * timed-out indication to the guest.  There is no reason
   7015         * we know to expect this failure to be transitory, so the
   7016         * guest may well hang retrying the operation.
   7017         */
   7018        qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
   7019                      ri->name, error_get_pretty(err));
   7020        error_free(err);
   7021
   7022        env->ZF = 0; /* NZCF = 0100 */
   7023        return 0;
   7024    }
   7025    return ret;
   7026}
   7027
   7028/* We do not support re-seeding, so the two registers operate the same.  */
   7029static const ARMCPRegInfo rndr_reginfo[] = {
   7030    { .name = "RNDR", .state = ARM_CP_STATE_AA64,
   7031      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
   7032      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
   7033      .access = PL0_R, .readfn = rndr_readfn },
   7034    { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
   7035      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
   7036      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
   7037      .access = PL0_R, .readfn = rndr_readfn },
   7038    REGINFO_SENTINEL
   7039};
   7040
   7041#ifndef CONFIG_USER_ONLY
   7042static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
   7043                          uint64_t value)
   7044{
   7045    ARMCPU *cpu = env_archcpu(env);
   7046    /* CTR_EL0 System register -> DminLine, bits [19:16] */
   7047    uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
   7048    uint64_t vaddr_in = (uint64_t) value;
   7049    uint64_t vaddr = vaddr_in & ~(dline_size - 1);
   7050    void *haddr;
   7051    int mem_idx = cpu_mmu_index(env, false);
   7052
   7053    /* This won't be crossing page boundaries */
   7054    haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
   7055    if (haddr) {
   7056
   7057        ram_addr_t offset;
   7058        MemoryRegion *mr;
   7059
   7060        /* RCU lock is already being held */
   7061        mr = memory_region_from_host(haddr, &offset);
   7062
   7063        if (mr) {
   7064            memory_region_writeback(mr, offset, dline_size);
   7065        }
   7066    }
   7067}
   7068
   7069static const ARMCPRegInfo dcpop_reg[] = {
   7070    { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
   7071      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
   7072      .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
   7073      .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
   7074    REGINFO_SENTINEL
   7075};
   7076
   7077static const ARMCPRegInfo dcpodp_reg[] = {
   7078    { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
   7079      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
   7080      .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
   7081      .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
   7082    REGINFO_SENTINEL
   7083};
   7084#endif /*CONFIG_USER_ONLY*/
   7085
   7086static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
   7087                                       bool isread)
   7088{
   7089    if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
   7090        return CP_ACCESS_TRAP_EL2;
   7091    }
   7092
   7093    return CP_ACCESS_OK;
   7094}
   7095
   7096static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
   7097                                 bool isread)
   7098{
   7099    int el = arm_current_el(env);
   7100
   7101    if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
   7102        uint64_t hcr = arm_hcr_el2_eff(env);
   7103        if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
   7104            return CP_ACCESS_TRAP_EL2;
   7105        }
   7106    }
   7107    if (el < 3 &&
   7108        arm_feature(env, ARM_FEATURE_EL3) &&
   7109        !(env->cp15.scr_el3 & SCR_ATA)) {
   7110        return CP_ACCESS_TRAP_EL3;
   7111    }
   7112    return CP_ACCESS_OK;
   7113}
   7114
   7115static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
   7116{
   7117    return env->pstate & PSTATE_TCO;
   7118}
   7119
   7120static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
   7121{
   7122    env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
   7123}
   7124
   7125static const ARMCPRegInfo mte_reginfo[] = {
   7126    { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
   7127      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
   7128      .access = PL1_RW, .accessfn = access_mte,
   7129      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
   7130    { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
   7131      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
   7132      .access = PL1_RW, .accessfn = access_mte,
   7133      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
   7134    { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
   7135      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
   7136      .access = PL2_RW, .accessfn = access_mte,
   7137      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
   7138    { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
   7139      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
   7140      .access = PL3_RW,
   7141      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
   7142    { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
   7143      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
   7144      .access = PL1_RW, .accessfn = access_mte,
   7145      .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
   7146    { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
   7147      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
   7148      .access = PL1_RW, .accessfn = access_mte,
   7149      .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
   7150    { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
   7151      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
   7152      .access = PL1_R, .accessfn = access_aa64_tid5,
   7153      .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
   7154    { .name = "TCO", .state = ARM_CP_STATE_AA64,
   7155      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
   7156      .type = ARM_CP_NO_RAW,
   7157      .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
   7158    { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
   7159      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
   7160      .type = ARM_CP_NOP, .access = PL1_W,
   7161      .accessfn = aa64_cacheop_poc_access },
   7162    { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
   7163      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
   7164      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7165    { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
   7166      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
   7167      .type = ARM_CP_NOP, .access = PL1_W,
   7168      .accessfn = aa64_cacheop_poc_access },
   7169    { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
   7170      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
   7171      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7172    { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
   7173      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
   7174      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7175    { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
   7176      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
   7177      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7178    { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
   7179      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
   7180      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7181    { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
   7182      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
   7183      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7184    REGINFO_SENTINEL
   7185};
   7186
   7187static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
   7188    { .name = "TCO", .state = ARM_CP_STATE_AA64,
   7189      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
   7190      .type = ARM_CP_CONST, .access = PL0_RW, },
   7191    REGINFO_SENTINEL
   7192};
   7193
   7194static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
   7195    { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
   7196      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
   7197      .type = ARM_CP_NOP, .access = PL0_W,
   7198      .accessfn = aa64_cacheop_poc_access },
   7199    { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
   7200      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
   7201      .type = ARM_CP_NOP, .access = PL0_W,
   7202      .accessfn = aa64_cacheop_poc_access },
   7203    { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
   7204      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
   7205      .type = ARM_CP_NOP, .access = PL0_W,
   7206      .accessfn = aa64_cacheop_poc_access },
   7207    { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
   7208      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
   7209      .type = ARM_CP_NOP, .access = PL0_W,
   7210      .accessfn = aa64_cacheop_poc_access },
   7211    { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
   7212      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
   7213      .type = ARM_CP_NOP, .access = PL0_W,
   7214      .accessfn = aa64_cacheop_poc_access },
   7215    { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
   7216      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
   7217      .type = ARM_CP_NOP, .access = PL0_W,
   7218      .accessfn = aa64_cacheop_poc_access },
   7219    { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
   7220      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
   7221      .type = ARM_CP_NOP, .access = PL0_W,
   7222      .accessfn = aa64_cacheop_poc_access },
   7223    { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
   7224      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
   7225      .type = ARM_CP_NOP, .access = PL0_W,
   7226      .accessfn = aa64_cacheop_poc_access },
   7227    { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
   7228      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
   7229      .access = PL0_W, .type = ARM_CP_DC_GVA,
   7230#ifndef CONFIG_USER_ONLY
   7231      /* Avoid overhead of an access check that always passes in user-mode */
   7232      .accessfn = aa64_zva_access,
   7233#endif
   7234    },
   7235    { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
   7236      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
   7237      .access = PL0_W, .type = ARM_CP_DC_GZVA,
   7238#ifndef CONFIG_USER_ONLY
   7239      /* Avoid overhead of an access check that always passes in user-mode */
   7240      .accessfn = aa64_zva_access,
   7241#endif
   7242    },
   7243    REGINFO_SENTINEL
   7244};
   7245
   7246#endif
   7247
   7248static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
   7249                                     bool isread)
   7250{
   7251    int el = arm_current_el(env);
   7252
   7253    if (el == 0) {
   7254        uint64_t sctlr = arm_sctlr(env, el);
   7255        if (!(sctlr & SCTLR_EnRCTX)) {
   7256            return CP_ACCESS_TRAP;
   7257        }
   7258    } else if (el == 1) {
   7259        uint64_t hcr = arm_hcr_el2_eff(env);
   7260        if (hcr & HCR_NV) {
   7261            return CP_ACCESS_TRAP_EL2;
   7262        }
   7263    }
   7264    return CP_ACCESS_OK;
   7265}
   7266
   7267static const ARMCPRegInfo predinv_reginfo[] = {
   7268    { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
   7269      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
   7270      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7271    { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
   7272      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
   7273      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7274    { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
   7275      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
   7276      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7277    /*
   7278     * Note the AArch32 opcodes have a different OPC1.
   7279     */
   7280    { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
   7281      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
   7282      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7283    { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
   7284      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
   7285      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7286    { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
   7287      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
   7288      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7289    REGINFO_SENTINEL
   7290};
   7291
   7292static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
   7293{
   7294    /* Read the high 32 bits of the current CCSIDR */
   7295    return extract64(ccsidr_read(env, ri), 32, 32);
   7296}
   7297
   7298static const ARMCPRegInfo ccsidr2_reginfo[] = {
   7299    { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
   7300      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
   7301      .access = PL1_R,
   7302      .accessfn = access_aa64_tid2,
   7303      .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
   7304    REGINFO_SENTINEL
   7305};
   7306
   7307static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
   7308                                       bool isread)
   7309{
   7310    if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
   7311        return CP_ACCESS_TRAP_EL2;
   7312    }
   7313
   7314    return CP_ACCESS_OK;
   7315}
   7316
   7317static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
   7318                                       bool isread)
   7319{
   7320    if (arm_feature(env, ARM_FEATURE_V8)) {
   7321        return access_aa64_tid3(env, ri, isread);
   7322    }
   7323
   7324    return CP_ACCESS_OK;
   7325}
   7326
   7327static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
   7328                                     bool isread)
   7329{
   7330    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
   7331        return CP_ACCESS_TRAP_EL2;
   7332    }
   7333
   7334    return CP_ACCESS_OK;
   7335}
   7336
   7337static CPAccessResult access_joscr_jmcr(CPUARMState *env,
   7338                                        const ARMCPRegInfo *ri, bool isread)
   7339{
   7340    /*
   7341     * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
   7342     * in v7A, not in v8A.
   7343     */
   7344    if (!arm_feature(env, ARM_FEATURE_V8) &&
   7345        arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
   7346        (env->cp15.hstr_el2 & HSTR_TJDBX)) {
   7347        return CP_ACCESS_TRAP_EL2;
   7348    }
   7349    return CP_ACCESS_OK;
   7350}
   7351
   7352static const ARMCPRegInfo jazelle_regs[] = {
   7353    { .name = "JIDR",
   7354      .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
   7355      .access = PL1_R, .accessfn = access_jazelle,
   7356      .type = ARM_CP_CONST, .resetvalue = 0 },
   7357    { .name = "JOSCR",
   7358      .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
   7359      .accessfn = access_joscr_jmcr,
   7360      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   7361    { .name = "JMCR",
   7362      .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
   7363      .accessfn = access_joscr_jmcr,
   7364      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   7365    REGINFO_SENTINEL
   7366};
   7367
   7368static const ARMCPRegInfo vhe_reginfo[] = {
   7369    { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
   7370      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
   7371      .access = PL2_RW,
   7372      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
   7373    { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
   7374      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
   7375      .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
   7376      .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
   7377#ifndef CONFIG_USER_ONLY
   7378    { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
   7379      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
   7380      .fieldoffset =
   7381        offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
   7382      .type = ARM_CP_IO, .access = PL2_RW,
   7383      .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
   7384    { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
   7385      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
   7386      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
   7387      .resetfn = gt_hv_timer_reset,
   7388      .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
   7389    { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
   7390      .type = ARM_CP_IO,
   7391      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
   7392      .access = PL2_RW,
   7393      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
   7394      .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
   7395    { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
   7396      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
   7397      .type = ARM_CP_IO | ARM_CP_ALIAS,
   7398      .access = PL2_RW, .accessfn = e2h_access,
   7399      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
   7400      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
   7401    { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
   7402      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
   7403      .type = ARM_CP_IO | ARM_CP_ALIAS,
   7404      .access = PL2_RW, .accessfn = e2h_access,
   7405      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
   7406      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
   7407    { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
   7408      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
   7409      .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
   7410      .access = PL2_RW, .accessfn = e2h_access,
   7411      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
   7412    { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
   7413      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
   7414      .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
   7415      .access = PL2_RW, .accessfn = e2h_access,
   7416      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
   7417    { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
   7418      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
   7419      .type = ARM_CP_IO | ARM_CP_ALIAS,
   7420      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
   7421      .access = PL2_RW, .accessfn = e2h_access,
   7422      .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
   7423    { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
   7424      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
   7425      .type = ARM_CP_IO | ARM_CP_ALIAS,
   7426      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
   7427      .access = PL2_RW, .accessfn = e2h_access,
   7428      .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
   7429#endif
   7430    REGINFO_SENTINEL
   7431};
   7432
   7433#ifndef CONFIG_USER_ONLY
   7434static const ARMCPRegInfo ats1e1_reginfo[] = {
   7435    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
   7436      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
   7437      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7438      .writefn = ats_write64 },
   7439    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
   7440      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
   7441      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7442      .writefn = ats_write64 },
   7443    REGINFO_SENTINEL
   7444};
   7445
   7446static const ARMCPRegInfo ats1cp_reginfo[] = {
   7447    { .name = "ATS1CPRP",
   7448      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
   7449      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7450      .writefn = ats_write },
   7451    { .name = "ATS1CPWP",
   7452      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
   7453      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7454      .writefn = ats_write },
   7455    REGINFO_SENTINEL
   7456};
   7457#endif
   7458
   7459/*
   7460 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
   7461 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
   7462 * is non-zero, which is never for ARMv7, optionally in ARMv8
   7463 * and mandatorily for ARMv8.2 and up.
   7464 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
   7465 * implementation is RAZ/WI we can ignore this detail, as we
   7466 * do for ACTLR.
   7467 */
   7468static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
   7469    { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
   7470      .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
   7471      .access = PL1_RW, .accessfn = access_tacr,
   7472      .type = ARM_CP_CONST, .resetvalue = 0 },
   7473    { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
   7474      .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
   7475      .access = PL2_RW, .type = ARM_CP_CONST,
   7476      .resetvalue = 0 },
   7477    REGINFO_SENTINEL
   7478};
   7479
   7480void register_cp_regs_for_features(ARMCPU *cpu)
   7481{
   7482    /* Register all the coprocessor registers based on feature bits */
   7483    CPUARMState *env = &cpu->env;
   7484    if (arm_feature(env, ARM_FEATURE_M)) {
   7485        /* M profile has no coprocessor registers */
   7486        return;
   7487    }
   7488
   7489    define_arm_cp_regs(cpu, cp_reginfo);
   7490    if (!arm_feature(env, ARM_FEATURE_V8)) {
   7491        /* Must go early as it is full of wildcards that may be
   7492         * overridden by later definitions.
   7493         */
   7494        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
   7495    }
   7496
   7497    if (arm_feature(env, ARM_FEATURE_V6)) {
   7498        /* The ID registers all have impdef reset values */
   7499        ARMCPRegInfo v6_idregs[] = {
   7500            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
   7501              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
   7502              .access = PL1_R, .type = ARM_CP_CONST,
   7503              .accessfn = access_aa32_tid3,
   7504              .resetvalue = cpu->isar.id_pfr0 },
   7505            /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
   7506             * the value of the GIC field until after we define these regs.
   7507             */
   7508            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
   7509              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
   7510              .access = PL1_R, .type = ARM_CP_NO_RAW,
   7511              .accessfn = access_aa32_tid3,
   7512              .readfn = id_pfr1_read,
   7513              .writefn = arm_cp_write_ignore },
   7514            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
   7515              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
   7516              .access = PL1_R, .type = ARM_CP_CONST,
   7517              .accessfn = access_aa32_tid3,
   7518              .resetvalue = cpu->isar.id_dfr0 },
   7519            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
   7520              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
   7521              .access = PL1_R, .type = ARM_CP_CONST,
   7522              .accessfn = access_aa32_tid3,
   7523              .resetvalue = cpu->id_afr0 },
   7524            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
   7525              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
   7526              .access = PL1_R, .type = ARM_CP_CONST,
   7527              .accessfn = access_aa32_tid3,
   7528              .resetvalue = cpu->isar.id_mmfr0 },
   7529            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
   7530              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
   7531              .access = PL1_R, .type = ARM_CP_CONST,
   7532              .accessfn = access_aa32_tid3,
   7533              .resetvalue = cpu->isar.id_mmfr1 },
   7534            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
   7535              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
   7536              .access = PL1_R, .type = ARM_CP_CONST,
   7537              .accessfn = access_aa32_tid3,
   7538              .resetvalue = cpu->isar.id_mmfr2 },
   7539            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
   7540              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
   7541              .access = PL1_R, .type = ARM_CP_CONST,
   7542              .accessfn = access_aa32_tid3,
   7543              .resetvalue = cpu->isar.id_mmfr3 },
   7544            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
   7545              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
   7546              .access = PL1_R, .type = ARM_CP_CONST,
   7547              .accessfn = access_aa32_tid3,
   7548              .resetvalue = cpu->isar.id_isar0 },
   7549            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
   7550              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
   7551              .access = PL1_R, .type = ARM_CP_CONST,
   7552              .accessfn = access_aa32_tid3,
   7553              .resetvalue = cpu->isar.id_isar1 },
   7554            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
   7555              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
   7556              .access = PL1_R, .type = ARM_CP_CONST,
   7557              .accessfn = access_aa32_tid3,
   7558              .resetvalue = cpu->isar.id_isar2 },
   7559            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
   7560              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
   7561              .access = PL1_R, .type = ARM_CP_CONST,
   7562              .accessfn = access_aa32_tid3,
   7563              .resetvalue = cpu->isar.id_isar3 },
   7564            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
   7565              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
   7566              .access = PL1_R, .type = ARM_CP_CONST,
   7567              .accessfn = access_aa32_tid3,
   7568              .resetvalue = cpu->isar.id_isar4 },
   7569            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
   7570              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
   7571              .access = PL1_R, .type = ARM_CP_CONST,
   7572              .accessfn = access_aa32_tid3,
   7573              .resetvalue = cpu->isar.id_isar5 },
   7574            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
   7575              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
   7576              .access = PL1_R, .type = ARM_CP_CONST,
   7577              .accessfn = access_aa32_tid3,
   7578              .resetvalue = cpu->isar.id_mmfr4 },
   7579            { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
   7580              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
   7581              .access = PL1_R, .type = ARM_CP_CONST,
   7582              .accessfn = access_aa32_tid3,
   7583              .resetvalue = cpu->isar.id_isar6 },
   7584            REGINFO_SENTINEL
   7585        };
   7586        define_arm_cp_regs(cpu, v6_idregs);
   7587        define_arm_cp_regs(cpu, v6_cp_reginfo);
   7588    } else {
   7589        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
   7590    }
   7591    if (arm_feature(env, ARM_FEATURE_V6K)) {
   7592        define_arm_cp_regs(cpu, v6k_cp_reginfo);
   7593    }
   7594    if (arm_feature(env, ARM_FEATURE_V7MP) &&
   7595        !arm_feature(env, ARM_FEATURE_PMSA)) {
   7596        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
   7597    }
   7598    if (arm_feature(env, ARM_FEATURE_V7VE)) {
   7599        define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
   7600    }
   7601    if (arm_feature(env, ARM_FEATURE_V7)) {
   7602        ARMCPRegInfo clidr = {
   7603            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
   7604            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
   7605            .access = PL1_R, .type = ARM_CP_CONST,
   7606            .accessfn = access_aa64_tid2,
   7607            .resetvalue = cpu->clidr
   7608        };
   7609        define_one_arm_cp_reg(cpu, &clidr);
   7610        define_arm_cp_regs(cpu, v7_cp_reginfo);
   7611        define_debug_regs(cpu);
   7612        define_pmu_regs(cpu);
   7613    } else {
   7614        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
   7615    }
   7616    if (arm_feature(env, ARM_FEATURE_V8)) {
   7617        /* AArch64 ID registers, which all have impdef reset values.
   7618         * Note that within the ID register ranges the unused slots
   7619         * must all RAZ, not UNDEF; future architecture versions may
   7620         * define new registers here.
   7621         */
   7622        ARMCPRegInfo v8_idregs[] = {
   7623            /*
   7624             * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
   7625             * emulation because we don't know the right value for the
   7626             * GIC field until after we define these regs.
   7627             */
   7628            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
   7629              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
   7630              .access = PL1_R,
   7631#ifdef CONFIG_USER_ONLY
   7632              .type = ARM_CP_CONST,
   7633              .resetvalue = cpu->isar.id_aa64pfr0
   7634#else
   7635              .type = ARM_CP_NO_RAW,
   7636              .accessfn = access_aa64_tid3,
   7637              .readfn = id_aa64pfr0_read,
   7638              .writefn = arm_cp_write_ignore
   7639#endif
   7640            },
   7641            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
   7642              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
   7643              .access = PL1_R, .type = ARM_CP_CONST,
   7644              .accessfn = access_aa64_tid3,
   7645              .resetvalue = cpu->isar.id_aa64pfr1},
   7646            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7647              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
   7648              .access = PL1_R, .type = ARM_CP_CONST,
   7649              .accessfn = access_aa64_tid3,
   7650              .resetvalue = 0 },
   7651            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7652              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
   7653              .access = PL1_R, .type = ARM_CP_CONST,
   7654              .accessfn = access_aa64_tid3,
   7655              .resetvalue = 0 },
   7656            { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
   7657              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
   7658              .access = PL1_R, .type = ARM_CP_CONST,
   7659              .accessfn = access_aa64_tid3,
   7660              .resetvalue = cpu->isar.id_aa64zfr0 },
   7661            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7662              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
   7663              .access = PL1_R, .type = ARM_CP_CONST,
   7664              .accessfn = access_aa64_tid3,
   7665              .resetvalue = 0 },
   7666            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7667              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
   7668              .access = PL1_R, .type = ARM_CP_CONST,
   7669              .accessfn = access_aa64_tid3,
   7670              .resetvalue = 0 },
   7671            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7672              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
   7673              .access = PL1_R, .type = ARM_CP_CONST,
   7674              .accessfn = access_aa64_tid3,
   7675              .resetvalue = 0 },
   7676            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
   7677              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
   7678              .access = PL1_R, .type = ARM_CP_CONST,
   7679              .accessfn = access_aa64_tid3,
   7680              .resetvalue = cpu->isar.id_aa64dfr0 },
   7681            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
   7682              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
   7683              .access = PL1_R, .type = ARM_CP_CONST,
   7684              .accessfn = access_aa64_tid3,
   7685              .resetvalue = cpu->isar.id_aa64dfr1 },
   7686            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7687              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
   7688              .access = PL1_R, .type = ARM_CP_CONST,
   7689              .accessfn = access_aa64_tid3,
   7690              .resetvalue = 0 },
   7691            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7692              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
   7693              .access = PL1_R, .type = ARM_CP_CONST,
   7694              .accessfn = access_aa64_tid3,
   7695              .resetvalue = 0 },
   7696            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
   7697              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
   7698              .access = PL1_R, .type = ARM_CP_CONST,
   7699              .accessfn = access_aa64_tid3,
   7700              .resetvalue = cpu->id_aa64afr0 },
   7701            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
   7702              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
   7703              .access = PL1_R, .type = ARM_CP_CONST,
   7704              .accessfn = access_aa64_tid3,
   7705              .resetvalue = cpu->id_aa64afr1 },
   7706            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7707              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
   7708              .access = PL1_R, .type = ARM_CP_CONST,
   7709              .accessfn = access_aa64_tid3,
   7710              .resetvalue = 0 },
   7711            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7712              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
   7713              .access = PL1_R, .type = ARM_CP_CONST,
   7714              .accessfn = access_aa64_tid3,
   7715              .resetvalue = 0 },
   7716            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
   7717              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
   7718              .access = PL1_R, .type = ARM_CP_CONST,
   7719              .accessfn = access_aa64_tid3,
   7720              .resetvalue = cpu->isar.id_aa64isar0 },
   7721            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
   7722              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
   7723              .access = PL1_R, .type = ARM_CP_CONST,
   7724              .accessfn = access_aa64_tid3,
   7725              .resetvalue = cpu->isar.id_aa64isar1 },
   7726            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7727              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
   7728              .access = PL1_R, .type = ARM_CP_CONST,
   7729              .accessfn = access_aa64_tid3,
   7730              .resetvalue = 0 },
   7731            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7732              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
   7733              .access = PL1_R, .type = ARM_CP_CONST,
   7734              .accessfn = access_aa64_tid3,
   7735              .resetvalue = 0 },
   7736            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7737              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
   7738              .access = PL1_R, .type = ARM_CP_CONST,
   7739              .accessfn = access_aa64_tid3,
   7740              .resetvalue = 0 },
   7741            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7742              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
   7743              .access = PL1_R, .type = ARM_CP_CONST,
   7744              .accessfn = access_aa64_tid3,
   7745              .resetvalue = 0 },
   7746            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7747              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
   7748              .access = PL1_R, .type = ARM_CP_CONST,
   7749              .accessfn = access_aa64_tid3,
   7750              .resetvalue = 0 },
   7751            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7752              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
   7753              .access = PL1_R, .type = ARM_CP_CONST,
   7754              .accessfn = access_aa64_tid3,
   7755              .resetvalue = 0 },
   7756            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
   7757              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
   7758              .access = PL1_R, .type = ARM_CP_CONST,
   7759              .accessfn = access_aa64_tid3,
   7760              .resetvalue = cpu->isar.id_aa64mmfr0 },
   7761            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
   7762              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
   7763              .access = PL1_R, .type = ARM_CP_CONST,
   7764              .accessfn = access_aa64_tid3,
   7765              .resetvalue = cpu->isar.id_aa64mmfr1 },
   7766            { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
   7767              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
   7768              .access = PL1_R, .type = ARM_CP_CONST,
   7769              .accessfn = access_aa64_tid3,
   7770              .resetvalue = cpu->isar.id_aa64mmfr2 },
   7771            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7772              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
   7773              .access = PL1_R, .type = ARM_CP_CONST,
   7774              .accessfn = access_aa64_tid3,
   7775              .resetvalue = 0 },
   7776            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7777              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
   7778              .access = PL1_R, .type = ARM_CP_CONST,
   7779              .accessfn = access_aa64_tid3,
   7780              .resetvalue = 0 },
   7781            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7782              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
   7783              .access = PL1_R, .type = ARM_CP_CONST,
   7784              .accessfn = access_aa64_tid3,
   7785              .resetvalue = 0 },
   7786            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7787              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
   7788              .access = PL1_R, .type = ARM_CP_CONST,
   7789              .accessfn = access_aa64_tid3,
   7790              .resetvalue = 0 },
   7791            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7792              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
   7793              .access = PL1_R, .type = ARM_CP_CONST,
   7794              .accessfn = access_aa64_tid3,
   7795              .resetvalue = 0 },
   7796            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
   7797              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
   7798              .access = PL1_R, .type = ARM_CP_CONST,
   7799              .accessfn = access_aa64_tid3,
   7800              .resetvalue = cpu->isar.mvfr0 },
   7801            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
   7802              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
   7803              .access = PL1_R, .type = ARM_CP_CONST,
   7804              .accessfn = access_aa64_tid3,
   7805              .resetvalue = cpu->isar.mvfr1 },
   7806            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
   7807              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
   7808              .access = PL1_R, .type = ARM_CP_CONST,
   7809              .accessfn = access_aa64_tid3,
   7810              .resetvalue = cpu->isar.mvfr2 },
   7811            { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7812              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
   7813              .access = PL1_R, .type = ARM_CP_CONST,
   7814              .accessfn = access_aa64_tid3,
   7815              .resetvalue = 0 },
   7816            { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
   7817              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
   7818              .access = PL1_R, .type = ARM_CP_CONST,
   7819              .accessfn = access_aa64_tid3,
   7820              .resetvalue = cpu->isar.id_pfr2 },
   7821            { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7822              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
   7823              .access = PL1_R, .type = ARM_CP_CONST,
   7824              .accessfn = access_aa64_tid3,
   7825              .resetvalue = 0 },
   7826            { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7827              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
   7828              .access = PL1_R, .type = ARM_CP_CONST,
   7829              .accessfn = access_aa64_tid3,
   7830              .resetvalue = 0 },
   7831            { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7832              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
   7833              .access = PL1_R, .type = ARM_CP_CONST,
   7834              .accessfn = access_aa64_tid3,
   7835              .resetvalue = 0 },
   7836            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
   7837              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
   7838              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7839              .resetvalue = extract64(cpu->pmceid0, 0, 32) },
   7840            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
   7841              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
   7842              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7843              .resetvalue = cpu->pmceid0 },
   7844            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
   7845              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
   7846              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7847              .resetvalue = extract64(cpu->pmceid1, 0, 32) },
   7848            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
   7849              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
   7850              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7851              .resetvalue = cpu->pmceid1 },
   7852            REGINFO_SENTINEL
   7853        };
   7854#ifdef CONFIG_USER_ONLY
   7855        ARMCPRegUserSpaceInfo v8_user_idregs[] = {
   7856            { .name = "ID_AA64PFR0_EL1",
   7857              .exported_bits = 0x000f000f00ff0000,
   7858              .fixed_bits    = 0x0000000000000011 },
   7859            { .name = "ID_AA64PFR1_EL1",
   7860              .exported_bits = 0x00000000000000f0 },
   7861            { .name = "ID_AA64PFR*_EL1_RESERVED",
   7862              .is_glob = true                     },
   7863            { .name = "ID_AA64ZFR0_EL1"           },
   7864            { .name = "ID_AA64MMFR0_EL1",
   7865              .fixed_bits    = 0x00000000ff000000 },
   7866            { .name = "ID_AA64MMFR1_EL1"          },
   7867            { .name = "ID_AA64MMFR*_EL1_RESERVED",
   7868              .is_glob = true                     },
   7869            { .name = "ID_AA64DFR0_EL1",
   7870              .fixed_bits    = 0x0000000000000006 },
   7871            { .name = "ID_AA64DFR1_EL1"           },
   7872            { .name = "ID_AA64DFR*_EL1_RESERVED",
   7873              .is_glob = true                     },
   7874            { .name = "ID_AA64AFR*",
   7875              .is_glob = true                     },
   7876            { .name = "ID_AA64ISAR0_EL1",
   7877              .exported_bits = 0x00fffffff0fffff0 },
   7878            { .name = "ID_AA64ISAR1_EL1",
   7879              .exported_bits = 0x000000f0ffffffff },
   7880            { .name = "ID_AA64ISAR*_EL1_RESERVED",
   7881              .is_glob = true                     },
   7882            REGUSERINFO_SENTINEL
   7883        };
   7884        modify_arm_cp_regs(v8_idregs, v8_user_idregs);
   7885#endif
   7886        /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
   7887        if (!arm_feature(env, ARM_FEATURE_EL3) &&
   7888            !arm_feature(env, ARM_FEATURE_EL2)) {
   7889            ARMCPRegInfo rvbar = {
   7890                .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
   7891                .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
   7892                .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
   7893            };
   7894            define_one_arm_cp_reg(cpu, &rvbar);
   7895        }
   7896        define_arm_cp_regs(cpu, v8_idregs);
   7897        define_arm_cp_regs(cpu, v8_cp_reginfo);
   7898    }
   7899    if (arm_feature(env, ARM_FEATURE_EL2)) {
   7900        uint64_t vmpidr_def = mpidr_read_val(env);
   7901        ARMCPRegInfo vpidr_regs[] = {
   7902            { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
   7903              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
   7904              .access = PL2_RW, .accessfn = access_el3_aa32ns,
   7905              .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
   7906              .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
   7907            { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
   7908              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
   7909              .access = PL2_RW, .resetvalue = cpu->midr,
   7910              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
   7911            { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
   7912              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
   7913              .access = PL2_RW, .accessfn = access_el3_aa32ns,
   7914              .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
   7915              .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
   7916            { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
   7917              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
   7918              .access = PL2_RW,
   7919              .resetvalue = vmpidr_def,
   7920              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
   7921            REGINFO_SENTINEL
   7922        };
   7923        define_arm_cp_regs(cpu, vpidr_regs);
   7924        define_arm_cp_regs(cpu, el2_cp_reginfo);
   7925        if (arm_feature(env, ARM_FEATURE_V8)) {
   7926            define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
   7927        }
   7928        if (cpu_isar_feature(aa64_sel2, cpu)) {
   7929            define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
   7930        }
   7931        /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
   7932        if (!arm_feature(env, ARM_FEATURE_EL3)) {
   7933            ARMCPRegInfo rvbar = {
   7934                .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
   7935                .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
   7936                .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
   7937            };
   7938            define_one_arm_cp_reg(cpu, &rvbar);
   7939        }
   7940    } else {
   7941        /* If EL2 is missing but higher ELs are enabled, we need to
   7942         * register the no_el2 reginfos.
   7943         */
   7944        if (arm_feature(env, ARM_FEATURE_EL3)) {
   7945            /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
   7946             * of MIDR_EL1 and MPIDR_EL1.
   7947             */
   7948            ARMCPRegInfo vpidr_regs[] = {
   7949                { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
   7950                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
   7951                  .access = PL2_RW, .accessfn = access_el3_aa32ns,
   7952                  .type = ARM_CP_CONST, .resetvalue = cpu->midr,
   7953                  .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
   7954                { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
   7955                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
   7956                  .access = PL2_RW, .accessfn = access_el3_aa32ns,
   7957                  .type = ARM_CP_NO_RAW,
   7958                  .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
   7959                REGINFO_SENTINEL
   7960            };
   7961            define_arm_cp_regs(cpu, vpidr_regs);
   7962            define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
   7963            if (arm_feature(env, ARM_FEATURE_V8)) {
   7964                define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
   7965            }
   7966        }
   7967    }
   7968    if (arm_feature(env, ARM_FEATURE_EL3)) {
   7969        define_arm_cp_regs(cpu, el3_cp_reginfo);
   7970        ARMCPRegInfo el3_regs[] = {
   7971            { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
   7972              .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
   7973              .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
   7974            { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
   7975              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
   7976              .access = PL3_RW,
   7977              .raw_writefn = raw_write, .writefn = sctlr_write,
   7978              .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
   7979              .resetvalue = cpu->reset_sctlr },
   7980            REGINFO_SENTINEL
   7981        };
   7982
   7983        define_arm_cp_regs(cpu, el3_regs);
   7984    }
   7985    /* The behaviour of NSACR is sufficiently various that we don't
   7986     * try to describe it in a single reginfo:
   7987     *  if EL3 is 64 bit, then trap to EL3 from S EL1,
   7988     *     reads as constant 0xc00 from NS EL1 and NS EL2
   7989     *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
   7990     *  if v7 without EL3, register doesn't exist
   7991     *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
   7992     */
   7993    if (arm_feature(env, ARM_FEATURE_EL3)) {
   7994        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
   7995            ARMCPRegInfo nsacr = {
   7996                .name = "NSACR", .type = ARM_CP_CONST,
   7997                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
   7998                .access = PL1_RW, .accessfn = nsacr_access,
   7999                .resetvalue = 0xc00
   8000            };
   8001            define_one_arm_cp_reg(cpu, &nsacr);
   8002        } else {
   8003            ARMCPRegInfo nsacr = {
   8004                .name = "NSACR",
   8005                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
   8006                .access = PL3_RW | PL1_R,
   8007                .resetvalue = 0,
   8008                .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
   8009            };
   8010            define_one_arm_cp_reg(cpu, &nsacr);
   8011        }
   8012    } else {
   8013        if (arm_feature(env, ARM_FEATURE_V8)) {
   8014            ARMCPRegInfo nsacr = {
   8015                .name = "NSACR", .type = ARM_CP_CONST,
   8016                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
   8017                .access = PL1_R,
   8018                .resetvalue = 0xc00
   8019            };
   8020            define_one_arm_cp_reg(cpu, &nsacr);
   8021        }
   8022    }
   8023
   8024    if (arm_feature(env, ARM_FEATURE_PMSA)) {
   8025        if (arm_feature(env, ARM_FEATURE_V6)) {
   8026            /* PMSAv6 not implemented */
   8027            assert(arm_feature(env, ARM_FEATURE_V7));
   8028            define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
   8029            define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
   8030        } else {
   8031            define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
   8032        }
   8033    } else {
   8034        define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
   8035        define_arm_cp_regs(cpu, vmsa_cp_reginfo);
   8036        /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
   8037        if (cpu_isar_feature(aa32_hpd, cpu)) {
   8038            define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
   8039        }
   8040    }
   8041    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
   8042        define_arm_cp_regs(cpu, t2ee_cp_reginfo);
   8043    }
   8044    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
   8045        define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
   8046    }
   8047    if (arm_feature(env, ARM_FEATURE_VAPA)) {
   8048        define_arm_cp_regs(cpu, vapa_cp_reginfo);
   8049    }
   8050    if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
   8051        define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
   8052    }
   8053    if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
   8054        define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
   8055    }
   8056    if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
   8057        define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
   8058    }
   8059    if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
   8060        define_arm_cp_regs(cpu, omap_cp_reginfo);
   8061    }
   8062    if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
   8063        define_arm_cp_regs(cpu, strongarm_cp_reginfo);
   8064    }
   8065    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
   8066        define_arm_cp_regs(cpu, xscale_cp_reginfo);
   8067    }
   8068    if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
   8069        define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
   8070    }
   8071    if (arm_feature(env, ARM_FEATURE_LPAE)) {
   8072        define_arm_cp_regs(cpu, lpae_cp_reginfo);
   8073    }
   8074    if (cpu_isar_feature(aa32_jazelle, cpu)) {
   8075        define_arm_cp_regs(cpu, jazelle_regs);
   8076    }
   8077    /* Slightly awkwardly, the OMAP and StrongARM cores need all of
   8078     * cp15 crn=0 to be writes-ignored, whereas for other cores they should
   8079     * be read-only (ie write causes UNDEF exception).
   8080     */
   8081    {
   8082        ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
   8083            /* Pre-v8 MIDR space.
   8084             * Note that the MIDR isn't a simple constant register because
   8085             * of the TI925 behaviour where writes to another register can
   8086             * cause the MIDR value to change.
   8087             *
   8088             * Unimplemented registers in the c15 0 0 0 space default to
   8089             * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
   8090             * and friends override accordingly.
   8091             */
   8092            { .name = "MIDR",
   8093              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
   8094              .access = PL1_R, .resetvalue = cpu->midr,
   8095              .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
   8096              .readfn = midr_read,
   8097              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
   8098              .type = ARM_CP_OVERRIDE },
   8099            /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
   8100            { .name = "DUMMY",
   8101              .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
   8102              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8103            { .name = "DUMMY",
   8104              .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
   8105              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8106            { .name = "DUMMY",
   8107              .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
   8108              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8109            { .name = "DUMMY",
   8110              .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
   8111              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8112            { .name = "DUMMY",
   8113              .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
   8114              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8115            REGINFO_SENTINEL
   8116        };
   8117        ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
   8118            { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
   8119              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
   8120              .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
   8121              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
   8122              .readfn = midr_read },
   8123            /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
   8124            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
   8125              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
   8126              .access = PL1_R, .resetvalue = cpu->midr },
   8127            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
   8128              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
   8129              .access = PL1_R, .resetvalue = cpu->midr },
   8130            { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
   8131              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
   8132              .access = PL1_R,
   8133              .accessfn = access_aa64_tid1,
   8134              .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
   8135            REGINFO_SENTINEL
   8136        };
   8137        ARMCPRegInfo id_cp_reginfo[] = {
   8138            /* These are common to v8 and pre-v8 */
   8139            { .name = "CTR",
   8140              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
   8141              .access = PL1_R, .accessfn = ctr_el0_access,
   8142              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
   8143            { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
   8144              .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
   8145              .access = PL0_R, .accessfn = ctr_el0_access,
   8146              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
   8147            /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
   8148            { .name = "TCMTR",
   8149              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
   8150              .access = PL1_R,
   8151              .accessfn = access_aa32_tid1,
   8152              .type = ARM_CP_CONST, .resetvalue = 0 },
   8153            REGINFO_SENTINEL
   8154        };
   8155        /* TLBTR is specific to VMSA */
   8156        ARMCPRegInfo id_tlbtr_reginfo = {
   8157              .name = "TLBTR",
   8158              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
   8159              .access = PL1_R,
   8160              .accessfn = access_aa32_tid1,
   8161              .type = ARM_CP_CONST, .resetvalue = 0,
   8162        };
   8163        /* MPUIR is specific to PMSA V6+ */
   8164        ARMCPRegInfo id_mpuir_reginfo = {
   8165              .name = "MPUIR",
   8166              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
   8167              .access = PL1_R, .type = ARM_CP_CONST,
   8168              .resetvalue = cpu->pmsav7_dregion << 8
   8169        };
   8170        ARMCPRegInfo crn0_wi_reginfo = {
   8171            .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
   8172            .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
   8173            .type = ARM_CP_NOP | ARM_CP_OVERRIDE
   8174        };
   8175#ifdef CONFIG_USER_ONLY
   8176        ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
   8177            { .name = "MIDR_EL1",
   8178              .exported_bits = 0x00000000ffffffff },
   8179            { .name = "REVIDR_EL1"                },
   8180            REGUSERINFO_SENTINEL
   8181        };
   8182        modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
   8183#endif
   8184        if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
   8185            arm_feature(env, ARM_FEATURE_STRONGARM)) {
   8186            ARMCPRegInfo *r;
   8187            /* Register the blanket "writes ignored" value first to cover the
   8188             * whole space. Then update the specific ID registers to allow write
   8189             * access, so that they ignore writes rather than causing them to
   8190             * UNDEF.
   8191             */
   8192            define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
   8193            for (r = id_pre_v8_midr_cp_reginfo;
   8194                 r->type != ARM_CP_SENTINEL; r++) {
   8195                r->access = PL1_RW;
   8196            }
   8197            for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
   8198                r->access = PL1_RW;
   8199            }
   8200            id_mpuir_reginfo.access = PL1_RW;
   8201            id_tlbtr_reginfo.access = PL1_RW;
   8202        }
   8203        if (arm_feature(env, ARM_FEATURE_V8)) {
   8204            define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
   8205        } else {
   8206            define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
   8207        }
   8208        define_arm_cp_regs(cpu, id_cp_reginfo);
   8209        if (!arm_feature(env, ARM_FEATURE_PMSA)) {
   8210            define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
   8211        } else if (arm_feature(env, ARM_FEATURE_V7)) {
   8212            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
   8213        }
   8214    }
   8215
   8216    if (arm_feature(env, ARM_FEATURE_MPIDR)) {
   8217        ARMCPRegInfo mpidr_cp_reginfo[] = {
   8218            { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
   8219              .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
   8220              .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
   8221            REGINFO_SENTINEL
   8222        };
   8223#ifdef CONFIG_USER_ONLY
   8224        ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
   8225            { .name = "MPIDR_EL1",
   8226              .fixed_bits = 0x0000000080000000 },
   8227            REGUSERINFO_SENTINEL
   8228        };
   8229        modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
   8230#endif
   8231        define_arm_cp_regs(cpu, mpidr_cp_reginfo);
   8232    }
   8233
   8234    if (arm_feature(env, ARM_FEATURE_AUXCR)) {
   8235        ARMCPRegInfo auxcr_reginfo[] = {
   8236            { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
   8237              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
   8238              .access = PL1_RW, .accessfn = access_tacr,
   8239              .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
   8240            { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
   8241              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
   8242              .access = PL2_RW, .type = ARM_CP_CONST,
   8243              .resetvalue = 0 },
   8244            { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
   8245              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
   8246              .access = PL3_RW, .type = ARM_CP_CONST,
   8247              .resetvalue = 0 },
   8248            REGINFO_SENTINEL
   8249        };
   8250        define_arm_cp_regs(cpu, auxcr_reginfo);
   8251        if (cpu_isar_feature(aa32_ac2, cpu)) {
   8252            define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
   8253        }
   8254    }
   8255
   8256    if (arm_feature(env, ARM_FEATURE_CBAR)) {
   8257        /*
   8258         * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
   8259         * There are two flavours:
   8260         *  (1) older 32-bit only cores have a simple 32-bit CBAR
   8261         *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
   8262         *      32-bit register visible to AArch32 at a different encoding
   8263         *      to the "flavour 1" register and with the bits rearranged to
   8264         *      be able to squash a 64-bit address into the 32-bit view.
   8265         * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
   8266         * in future if we support AArch32-only configs of some of the
   8267         * AArch64 cores we might need to add a specific feature flag
   8268         * to indicate cores with "flavour 2" CBAR.
   8269         */
   8270        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
   8271            /* 32 bit view is [31:18] 0...0 [43:32]. */
   8272            uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
   8273                | extract64(cpu->reset_cbar, 32, 12);
   8274            ARMCPRegInfo cbar_reginfo[] = {
   8275                { .name = "CBAR",
   8276                  .type = ARM_CP_CONST,
   8277                  .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
   8278                  .access = PL1_R, .resetvalue = cbar32 },
   8279                { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
   8280                  .type = ARM_CP_CONST,
   8281                  .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
   8282                  .access = PL1_R, .resetvalue = cpu->reset_cbar },
   8283                REGINFO_SENTINEL
   8284            };
   8285            /* We don't implement a r/w 64 bit CBAR currently */
   8286            assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
   8287            define_arm_cp_regs(cpu, cbar_reginfo);
   8288        } else {
   8289            ARMCPRegInfo cbar = {
   8290                .name = "CBAR",
   8291                .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
   8292                .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
   8293                .fieldoffset = offsetof(CPUARMState,
   8294                                        cp15.c15_config_base_address)
   8295            };
   8296            if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
   8297                cbar.access = PL1_R;
   8298                cbar.fieldoffset = 0;
   8299                cbar.type = ARM_CP_CONST;
   8300            }
   8301            define_one_arm_cp_reg(cpu, &cbar);
   8302        }
   8303    }
   8304
   8305    if (arm_feature(env, ARM_FEATURE_VBAR)) {
   8306        ARMCPRegInfo vbar_cp_reginfo[] = {
   8307            { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
   8308              .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
   8309              .access = PL1_RW, .writefn = vbar_write,
   8310              .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
   8311                                     offsetof(CPUARMState, cp15.vbar_ns) },
   8312              .resetvalue = 0 },
   8313            REGINFO_SENTINEL
   8314        };
   8315        define_arm_cp_regs(cpu, vbar_cp_reginfo);
   8316    }
   8317
   8318    /* Generic registers whose values depend on the implementation */
   8319    {
   8320        ARMCPRegInfo sctlr = {
   8321            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
   8322            .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
   8323            .access = PL1_RW, .accessfn = access_tvm_trvm,
   8324            .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
   8325                                   offsetof(CPUARMState, cp15.sctlr_ns) },
   8326            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
   8327            .raw_writefn = raw_write,
   8328        };
   8329        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
   8330            /* Normally we would always end the TB on an SCTLR write, but Linux
   8331             * arch/arm/mach-pxa/sleep.S expects two instructions following
   8332             * an MMU enable to execute from cache.  Imitate this behaviour.
   8333             */
   8334            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
   8335        }
   8336        define_one_arm_cp_reg(cpu, &sctlr);
   8337    }
   8338
   8339    if (cpu_isar_feature(aa64_lor, cpu)) {
   8340        define_arm_cp_regs(cpu, lor_reginfo);
   8341    }
   8342    if (cpu_isar_feature(aa64_pan, cpu)) {
   8343        define_one_arm_cp_reg(cpu, &pan_reginfo);
   8344    }
   8345#ifndef CONFIG_USER_ONLY
   8346    if (cpu_isar_feature(aa64_ats1e1, cpu)) {
   8347        define_arm_cp_regs(cpu, ats1e1_reginfo);
   8348    }
   8349    if (cpu_isar_feature(aa32_ats1e1, cpu)) {
   8350        define_arm_cp_regs(cpu, ats1cp_reginfo);
   8351    }
   8352#endif
   8353    if (cpu_isar_feature(aa64_uao, cpu)) {
   8354        define_one_arm_cp_reg(cpu, &uao_reginfo);
   8355    }
   8356
   8357    if (cpu_isar_feature(aa64_dit, cpu)) {
   8358        define_one_arm_cp_reg(cpu, &dit_reginfo);
   8359    }
   8360    if (cpu_isar_feature(aa64_ssbs, cpu)) {
   8361        define_one_arm_cp_reg(cpu, &ssbs_reginfo);
   8362    }
   8363
   8364    if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
   8365        define_arm_cp_regs(cpu, vhe_reginfo);
   8366    }
   8367
   8368    if (cpu_isar_feature(aa64_sve, cpu)) {
   8369        define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
   8370        if (arm_feature(env, ARM_FEATURE_EL2)) {
   8371            define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
   8372        } else {
   8373            define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
   8374        }
   8375        if (arm_feature(env, ARM_FEATURE_EL3)) {
   8376            define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
   8377        }
   8378    }
   8379
   8380#ifdef TARGET_AARCH64
   8381    if (cpu_isar_feature(aa64_pauth, cpu)) {
   8382        define_arm_cp_regs(cpu, pauth_reginfo);
   8383    }
   8384    if (cpu_isar_feature(aa64_rndr, cpu)) {
   8385        define_arm_cp_regs(cpu, rndr_reginfo);
   8386    }
   8387    if (cpu_isar_feature(aa64_tlbirange, cpu)) {
   8388        define_arm_cp_regs(cpu, tlbirange_reginfo);
   8389    }
   8390    if (cpu_isar_feature(aa64_tlbios, cpu)) {
   8391        define_arm_cp_regs(cpu, tlbios_reginfo);
   8392    }
   8393#ifndef CONFIG_USER_ONLY
   8394    /* Data Cache clean instructions up to PoP */
   8395    if (cpu_isar_feature(aa64_dcpop, cpu)) {
   8396        define_one_arm_cp_reg(cpu, dcpop_reg);
   8397
   8398        if (cpu_isar_feature(aa64_dcpodp, cpu)) {
   8399            define_one_arm_cp_reg(cpu, dcpodp_reg);
   8400        }
   8401    }
   8402#endif /*CONFIG_USER_ONLY*/
   8403
   8404    /*
   8405     * If full MTE is enabled, add all of the system registers.
   8406     * If only "instructions available at EL0" are enabled,
   8407     * then define only a RAZ/WI version of PSTATE.TCO.
   8408     */
   8409    if (cpu_isar_feature(aa64_mte, cpu)) {
   8410        define_arm_cp_regs(cpu, mte_reginfo);
   8411        define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
   8412    } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
   8413        define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
   8414        define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
   8415    }
   8416#endif
   8417
   8418    if (cpu_isar_feature(any_predinv, cpu)) {
   8419        define_arm_cp_regs(cpu, predinv_reginfo);
   8420    }
   8421
   8422    if (cpu_isar_feature(any_ccidx, cpu)) {
   8423        define_arm_cp_regs(cpu, ccsidr2_reginfo);
   8424    }
   8425
   8426#ifndef CONFIG_USER_ONLY
   8427    /*
   8428     * Register redirections and aliases must be done last,
   8429     * after the registers from the other extensions have been defined.
   8430     */
   8431    if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
   8432        define_arm_vh_e2h_redirects_aliases(cpu);
   8433    }
   8434#endif
   8435}
   8436
   8437/* Sort alphabetically by type name, except for "any". */
   8438static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
   8439{
   8440    ObjectClass *class_a = (ObjectClass *)a;
   8441    ObjectClass *class_b = (ObjectClass *)b;
   8442    const char *name_a, *name_b;
   8443
   8444    name_a = object_class_get_name(class_a);
   8445    name_b = object_class_get_name(class_b);
   8446    if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
   8447        return 1;
   8448    } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
   8449        return -1;
   8450    } else {
   8451        return strcmp(name_a, name_b);
   8452    }
   8453}
   8454
   8455static void arm_cpu_list_entry(gpointer data, gpointer user_data)
   8456{
   8457    ObjectClass *oc = data;
   8458    const char *typename;
   8459    char *name;
   8460
   8461    typename = object_class_get_name(oc);
   8462    name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
   8463    qemu_printf("  %s\n", name);
   8464    g_free(name);
   8465}
   8466
   8467void arm_cpu_list(void)
   8468{
   8469    GSList *list;
   8470
   8471    list = object_class_get_list(TYPE_ARM_CPU, false);
   8472    list = g_slist_sort(list, arm_cpu_list_compare);
   8473    qemu_printf("Available CPUs:\n");
   8474    g_slist_foreach(list, arm_cpu_list_entry, NULL);
   8475    g_slist_free(list);
   8476}
   8477
   8478static void arm_cpu_add_definition(gpointer data, gpointer user_data)
   8479{
   8480    ObjectClass *oc = data;
   8481    CpuDefinitionInfoList **cpu_list = user_data;
   8482    CpuDefinitionInfo *info;
   8483    const char *typename;
   8484
   8485    typename = object_class_get_name(oc);
   8486    info = g_malloc0(sizeof(*info));
   8487    info->name = g_strndup(typename,
   8488                           strlen(typename) - strlen("-" TYPE_ARM_CPU));
   8489    info->q_typename = g_strdup(typename);
   8490
   8491    QAPI_LIST_PREPEND(*cpu_list, info);
   8492}
   8493
   8494CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
   8495{
   8496    CpuDefinitionInfoList *cpu_list = NULL;
   8497    GSList *list;
   8498
   8499    list = object_class_get_list(TYPE_ARM_CPU, false);
   8500    g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
   8501    g_slist_free(list);
   8502
   8503    return cpu_list;
   8504}
   8505
   8506static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
   8507                                   void *opaque, int state, int secstate,
   8508                                   int crm, int opc1, int opc2,
   8509                                   const char *name)
   8510{
   8511    /* Private utility function for define_one_arm_cp_reg_with_opaque():
   8512     * add a single reginfo struct to the hash table.
   8513     */
   8514    uint32_t *key = g_new(uint32_t, 1);
   8515    ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
   8516    int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
   8517    int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
   8518
   8519    r2->name = g_strdup(name);
   8520    /* Reset the secure state to the specific incoming state.  This is
   8521     * necessary as the register may have been defined with both states.
   8522     */
   8523    r2->secure = secstate;
   8524
   8525    if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
   8526        /* Register is banked (using both entries in array).
   8527         * Overwriting fieldoffset as the array is only used to define
   8528         * banked registers but later only fieldoffset is used.
   8529         */
   8530        r2->fieldoffset = r->bank_fieldoffsets[ns];
   8531    }
   8532
   8533    if (state == ARM_CP_STATE_AA32) {
   8534        if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
   8535            /* If the register is banked then we don't need to migrate or
   8536             * reset the 32-bit instance in certain cases:
   8537             *
   8538             * 1) If the register has both 32-bit and 64-bit instances then we
   8539             *    can count on the 64-bit instance taking care of the
   8540             *    non-secure bank.
   8541             * 2) If ARMv8 is enabled then we can count on a 64-bit version
   8542             *    taking care of the secure bank.  This requires that separate
   8543             *    32 and 64-bit definitions are provided.
   8544             */
   8545            if ((r->state == ARM_CP_STATE_BOTH && ns) ||
   8546                (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
   8547                r2->type |= ARM_CP_ALIAS;
   8548            }
   8549        } else if ((secstate != r->secure) && !ns) {
   8550            /* The register is not banked so we only want to allow migration of
   8551             * the non-secure instance.
   8552             */
   8553            r2->type |= ARM_CP_ALIAS;
   8554        }
   8555
   8556        if (r->state == ARM_CP_STATE_BOTH) {
   8557            /* We assume it is a cp15 register if the .cp field is left unset.
   8558             */
   8559            if (r2->cp == 0) {
   8560                r2->cp = 15;
   8561            }
   8562
   8563#ifdef HOST_WORDS_BIGENDIAN
   8564            if (r2->fieldoffset) {
   8565                r2->fieldoffset += sizeof(uint32_t);
   8566            }
   8567#endif
   8568        }
   8569    }
   8570    if (state == ARM_CP_STATE_AA64) {
   8571        /* To allow abbreviation of ARMCPRegInfo
   8572         * definitions, we treat cp == 0 as equivalent to
   8573         * the value for "standard guest-visible sysreg".
   8574         * STATE_BOTH definitions are also always "standard
   8575         * sysreg" in their AArch64 view (the .cp value may
   8576         * be non-zero for the benefit of the AArch32 view).
   8577         */
   8578        if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
   8579            r2->cp = CP_REG_ARM64_SYSREG_CP;
   8580        }
   8581        *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
   8582                                  r2->opc0, opc1, opc2);
   8583    } else {
   8584        *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
   8585    }
   8586    if (opaque) {
   8587        r2->opaque = opaque;
   8588    }
   8589    /* reginfo passed to helpers is correct for the actual access,
   8590     * and is never ARM_CP_STATE_BOTH:
   8591     */
   8592    r2->state = state;
   8593    /* Make sure reginfo passed to helpers for wildcarded regs
   8594     * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
   8595     */
   8596    r2->crm = crm;
   8597    r2->opc1 = opc1;
   8598    r2->opc2 = opc2;
   8599    /* By convention, for wildcarded registers only the first
   8600     * entry is used for migration; the others are marked as
   8601     * ALIAS so we don't try to transfer the register
   8602     * multiple times. Special registers (ie NOP/WFI) are
   8603     * never migratable and not even raw-accessible.
   8604     */
   8605    if ((r->type & ARM_CP_SPECIAL)) {
   8606        r2->type |= ARM_CP_NO_RAW;
   8607    }
   8608    if (((r->crm == CP_ANY) && crm != 0) ||
   8609        ((r->opc1 == CP_ANY) && opc1 != 0) ||
   8610        ((r->opc2 == CP_ANY) && opc2 != 0)) {
   8611        r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
   8612    }
   8613
   8614    /* Check that raw accesses are either forbidden or handled. Note that
   8615     * we can't assert this earlier because the setup of fieldoffset for
   8616     * banked registers has to be done first.
   8617     */
   8618    if (!(r2->type & ARM_CP_NO_RAW)) {
   8619        assert(!raw_accessors_invalid(r2));
   8620    }
   8621
   8622    /* Overriding of an existing definition must be explicitly
   8623     * requested.
   8624     */
   8625    if (!(r->type & ARM_CP_OVERRIDE)) {
   8626        ARMCPRegInfo *oldreg;
   8627        oldreg = g_hash_table_lookup(cpu->cp_regs, key);
   8628        if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
   8629            fprintf(stderr, "Register redefined: cp=%d %d bit "
   8630                    "crn=%d crm=%d opc1=%d opc2=%d, "
   8631                    "was %s, now %s\n", r2->cp, 32 + 32 * is64,
   8632                    r2->crn, r2->crm, r2->opc1, r2->opc2,
   8633                    oldreg->name, r2->name);
   8634            g_assert_not_reached();
   8635        }
   8636    }
   8637    g_hash_table_insert(cpu->cp_regs, key, r2);
   8638}
   8639
   8640
   8641void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
   8642                                       const ARMCPRegInfo *r, void *opaque)
   8643{
   8644    /* Define implementations of coprocessor registers.
   8645     * We store these in a hashtable because typically
   8646     * there are less than 150 registers in a space which
   8647     * is 16*16*16*8*8 = 262144 in size.
   8648     * Wildcarding is supported for the crm, opc1 and opc2 fields.
   8649     * If a register is defined twice then the second definition is
   8650     * used, so this can be used to define some generic registers and
   8651     * then override them with implementation specific variations.
   8652     * At least one of the original and the second definition should
   8653     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
   8654     * against accidental use.
   8655     *
   8656     * The state field defines whether the register is to be
   8657     * visible in the AArch32 or AArch64 execution state. If the
   8658     * state is set to ARM_CP_STATE_BOTH then we synthesise a
   8659     * reginfo structure for the AArch32 view, which sees the lower
   8660     * 32 bits of the 64 bit register.
   8661     *
   8662     * Only registers visible in AArch64 may set r->opc0; opc0 cannot
   8663     * be wildcarded. AArch64 registers are always considered to be 64
   8664     * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
   8665     * the register, if any.
   8666     */
   8667    int crm, opc1, opc2, state;
   8668    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
   8669    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
   8670    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
   8671    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
   8672    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
   8673    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
   8674    /* 64 bit registers have only CRm and Opc1 fields */
   8675    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
   8676    /* op0 only exists in the AArch64 encodings */
   8677    assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
   8678    /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
   8679    assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
   8680    /*
   8681     * This API is only for Arm's system coprocessors (14 and 15) or
   8682     * (M-profile or v7A-and-earlier only) for implementation defined
   8683     * coprocessors in the range 0..7.  Our decode assumes this, since
   8684     * 8..13 can be used for other insns including VFP and Neon. See
   8685     * valid_cp() in translate.c.  Assert here that we haven't tried
   8686     * to use an invalid coprocessor number.
   8687     */
   8688    switch (r->state) {
   8689    case ARM_CP_STATE_BOTH:
   8690        /* 0 has a special meaning, but otherwise the same rules as AA32. */
   8691        if (r->cp == 0) {
   8692            break;
   8693        }
   8694        /* fall through */
   8695    case ARM_CP_STATE_AA32:
   8696        if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
   8697            !arm_feature(&cpu->env, ARM_FEATURE_M)) {
   8698            assert(r->cp >= 14 && r->cp <= 15);
   8699        } else {
   8700            assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
   8701        }
   8702        break;
   8703    case ARM_CP_STATE_AA64:
   8704        assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
   8705        break;
   8706    default:
   8707        g_assert_not_reached();
   8708    }
   8709    /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
   8710     * encodes a minimum access level for the register. We roll this
   8711     * runtime check into our general permission check code, so check
   8712     * here that the reginfo's specified permissions are strict enough
   8713     * to encompass the generic architectural permission check.
   8714     */
   8715    if (r->state != ARM_CP_STATE_AA32) {
   8716        int mask = 0;
   8717        switch (r->opc1) {
   8718        case 0:
   8719            /* min_EL EL1, but some accessible to EL0 via kernel ABI */
   8720            mask = PL0U_R | PL1_RW;
   8721            break;
   8722        case 1: case 2:
   8723            /* min_EL EL1 */
   8724            mask = PL1_RW;
   8725            break;
   8726        case 3:
   8727            /* min_EL EL0 */
   8728            mask = PL0_RW;
   8729            break;
   8730        case 4:
   8731        case 5:
   8732            /* min_EL EL2 */
   8733            mask = PL2_RW;
   8734            break;
   8735        case 6:
   8736            /* min_EL EL3 */
   8737            mask = PL3_RW;
   8738            break;
   8739        case 7:
   8740            /* min_EL EL1, secure mode only (we don't check the latter) */
   8741            mask = PL1_RW;
   8742            break;
   8743        default:
   8744            /* broken reginfo with out-of-range opc1 */
   8745            assert(false);
   8746            break;
   8747        }
   8748        /* assert our permissions are not too lax (stricter is fine) */
   8749        assert((r->access & ~mask) == 0);
   8750    }
   8751
   8752    /* Check that the register definition has enough info to handle
   8753     * reads and writes if they are permitted.
   8754     */
   8755    if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
   8756        if (r->access & PL3_R) {
   8757            assert((r->fieldoffset ||
   8758                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
   8759                   r->readfn);
   8760        }
   8761        if (r->access & PL3_W) {
   8762            assert((r->fieldoffset ||
   8763                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
   8764                   r->writefn);
   8765        }
   8766    }
   8767    /* Bad type field probably means missing sentinel at end of reg list */
   8768    assert(cptype_valid(r->type));
   8769    for (crm = crmmin; crm <= crmmax; crm++) {
   8770        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
   8771            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
   8772                for (state = ARM_CP_STATE_AA32;
   8773                     state <= ARM_CP_STATE_AA64; state++) {
   8774                    if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
   8775                        continue;
   8776                    }
   8777                    if (state == ARM_CP_STATE_AA32) {
   8778                        /* Under AArch32 CP registers can be common
   8779                         * (same for secure and non-secure world) or banked.
   8780                         */
   8781                        char *name;
   8782
   8783                        switch (r->secure) {
   8784                        case ARM_CP_SECSTATE_S:
   8785                        case ARM_CP_SECSTATE_NS:
   8786                            add_cpreg_to_hashtable(cpu, r, opaque, state,
   8787                                                   r->secure, crm, opc1, opc2,
   8788                                                   r->name);
   8789                            break;
   8790                        default:
   8791                            name = g_strdup_printf("%s_S", r->name);
   8792                            add_cpreg_to_hashtable(cpu, r, opaque, state,
   8793                                                   ARM_CP_SECSTATE_S,
   8794                                                   crm, opc1, opc2, name);
   8795                            g_free(name);
   8796                            add_cpreg_to_hashtable(cpu, r, opaque, state,
   8797                                                   ARM_CP_SECSTATE_NS,
   8798                                                   crm, opc1, opc2, r->name);
   8799                            break;
   8800                        }
   8801                    } else {
   8802                        /* AArch64 registers get mapped to non-secure instance
   8803                         * of AArch32 */
   8804                        add_cpreg_to_hashtable(cpu, r, opaque, state,
   8805                                               ARM_CP_SECSTATE_NS,
   8806                                               crm, opc1, opc2, r->name);
   8807                    }
   8808                }
   8809            }
   8810        }
   8811    }
   8812}
   8813
   8814void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
   8815                                    const ARMCPRegInfo *regs, void *opaque)
   8816{
   8817    /* Define a whole list of registers */
   8818    const ARMCPRegInfo *r;
   8819    for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
   8820        define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
   8821    }
   8822}
   8823
   8824/*
   8825 * Modify ARMCPRegInfo for access from userspace.
   8826 *
   8827 * This is a data driven modification directed by
   8828 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
   8829 * user-space cannot alter any values and dynamic values pertaining to
   8830 * execution state are hidden from user space view anyway.
   8831 */
   8832void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
   8833{
   8834    const ARMCPRegUserSpaceInfo *m;
   8835    ARMCPRegInfo *r;
   8836
   8837    for (m = mods; m->name; m++) {
   8838        GPatternSpec *pat = NULL;
   8839        if (m->is_glob) {
   8840            pat = g_pattern_spec_new(m->name);
   8841        }
   8842        for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
   8843            if (pat && g_pattern_match_string(pat, r->name)) {
   8844                r->type = ARM_CP_CONST;
   8845                r->access = PL0U_R;
   8846                r->resetvalue = 0;
   8847                /* continue */
   8848            } else if (strcmp(r->name, m->name) == 0) {
   8849                r->type = ARM_CP_CONST;
   8850                r->access = PL0U_R;
   8851                r->resetvalue &= m->exported_bits;
   8852                r->resetvalue |= m->fixed_bits;
   8853                break;
   8854            }
   8855        }
   8856        if (pat) {
   8857            g_pattern_spec_free(pat);
   8858        }
   8859    }
   8860}
   8861
   8862const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
   8863{
   8864    return g_hash_table_lookup(cpregs, &encoded_cp);
   8865}
   8866
   8867void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
   8868                         uint64_t value)
   8869{
   8870    /* Helper coprocessor write function for write-ignore registers */
   8871}
   8872
   8873uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
   8874{
   8875    /* Helper coprocessor write function for read-as-zero registers */
   8876    return 0;
   8877}
   8878
   8879void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
   8880{
   8881    /* Helper coprocessor reset function for do-nothing-on-reset registers */
   8882}
   8883
   8884static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
   8885{
   8886    /* Return true if it is not valid for us to switch to
   8887     * this CPU mode (ie all the UNPREDICTABLE cases in
   8888     * the ARM ARM CPSRWriteByInstr pseudocode).
   8889     */
   8890
   8891    /* Changes to or from Hyp via MSR and CPS are illegal. */
   8892    if (write_type == CPSRWriteByInstr &&
   8893        ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
   8894         mode == ARM_CPU_MODE_HYP)) {
   8895        return 1;
   8896    }
   8897
   8898    switch (mode) {
   8899    case ARM_CPU_MODE_USR:
   8900        return 0;
   8901    case ARM_CPU_MODE_SYS:
   8902    case ARM_CPU_MODE_SVC:
   8903    case ARM_CPU_MODE_ABT:
   8904    case ARM_CPU_MODE_UND:
   8905    case ARM_CPU_MODE_IRQ:
   8906    case ARM_CPU_MODE_FIQ:
   8907        /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
   8908         * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
   8909         */
   8910        /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
   8911         * and CPS are treated as illegal mode changes.
   8912         */
   8913        if (write_type == CPSRWriteByInstr &&
   8914            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
   8915            (arm_hcr_el2_eff(env) & HCR_TGE)) {
   8916            return 1;
   8917        }
   8918        return 0;
   8919    case ARM_CPU_MODE_HYP:
   8920        return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
   8921    case ARM_CPU_MODE_MON:
   8922        return arm_current_el(env) < 3;
   8923    default:
   8924        return 1;
   8925    }
   8926}
   8927
   8928uint32_t cpsr_read(CPUARMState *env)
   8929{
   8930    int ZF;
   8931    ZF = (env->ZF == 0);
   8932    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
   8933        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
   8934        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
   8935        | ((env->condexec_bits & 0xfc) << 8)
   8936        | (env->GE << 16) | (env->daif & CPSR_AIF);
   8937}
   8938
   8939void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
   8940                CPSRWriteType write_type)
   8941{
   8942    uint32_t changed_daif;
   8943    bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
   8944        (mask & (CPSR_M | CPSR_E | CPSR_IL));
   8945
   8946    if (mask & CPSR_NZCV) {
   8947        env->ZF = (~val) & CPSR_Z;
   8948        env->NF = val;
   8949        env->CF = (val >> 29) & 1;
   8950        env->VF = (val << 3) & 0x80000000;
   8951    }
   8952    if (mask & CPSR_Q)
   8953        env->QF = ((val & CPSR_Q) != 0);
   8954    if (mask & CPSR_T)
   8955        env->thumb = ((val & CPSR_T) != 0);
   8956    if (mask & CPSR_IT_0_1) {
   8957        env->condexec_bits &= ~3;
   8958        env->condexec_bits |= (val >> 25) & 3;
   8959    }
   8960    if (mask & CPSR_IT_2_7) {
   8961        env->condexec_bits &= 3;
   8962        env->condexec_bits |= (val >> 8) & 0xfc;
   8963    }
   8964    if (mask & CPSR_GE) {
   8965        env->GE = (val >> 16) & 0xf;
   8966    }
   8967
   8968    /* In a V7 implementation that includes the security extensions but does
   8969     * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
   8970     * whether non-secure software is allowed to change the CPSR_F and CPSR_A
   8971     * bits respectively.
   8972     *
   8973     * In a V8 implementation, it is permitted for privileged software to
   8974     * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
   8975     */
   8976    if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
   8977        arm_feature(env, ARM_FEATURE_EL3) &&
   8978        !arm_feature(env, ARM_FEATURE_EL2) &&
   8979        !arm_is_secure(env)) {
   8980
   8981        changed_daif = (env->daif ^ val) & mask;
   8982
   8983        if (changed_daif & CPSR_A) {
   8984            /* Check to see if we are allowed to change the masking of async
   8985             * abort exceptions from a non-secure state.
   8986             */
   8987            if (!(env->cp15.scr_el3 & SCR_AW)) {
   8988                qemu_log_mask(LOG_GUEST_ERROR,
   8989                              "Ignoring attempt to switch CPSR_A flag from "
   8990                              "non-secure world with SCR.AW bit clear\n");
   8991                mask &= ~CPSR_A;
   8992            }
   8993        }
   8994
   8995        if (changed_daif & CPSR_F) {
   8996            /* Check to see if we are allowed to change the masking of FIQ
   8997             * exceptions from a non-secure state.
   8998             */
   8999            if (!(env->cp15.scr_el3 & SCR_FW)) {
   9000                qemu_log_mask(LOG_GUEST_ERROR,
   9001                              "Ignoring attempt to switch CPSR_F flag from "
   9002                              "non-secure world with SCR.FW bit clear\n");
   9003                mask &= ~CPSR_F;
   9004            }
   9005
   9006            /* Check whether non-maskable FIQ (NMFI) support is enabled.
   9007             * If this bit is set software is not allowed to mask
   9008             * FIQs, but is allowed to set CPSR_F to 0.
   9009             */
   9010            if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
   9011                (val & CPSR_F)) {
   9012                qemu_log_mask(LOG_GUEST_ERROR,
   9013                              "Ignoring attempt to enable CPSR_F flag "
   9014                              "(non-maskable FIQ [NMFI] support enabled)\n");
   9015                mask &= ~CPSR_F;
   9016            }
   9017        }
   9018    }
   9019
   9020    env->daif &= ~(CPSR_AIF & mask);
   9021    env->daif |= val & CPSR_AIF & mask;
   9022
   9023    if (write_type != CPSRWriteRaw &&
   9024        ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
   9025        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
   9026            /* Note that we can only get here in USR mode if this is a
   9027             * gdb stub write; for this case we follow the architectural
   9028             * behaviour for guest writes in USR mode of ignoring an attempt
   9029             * to switch mode. (Those are caught by translate.c for writes
   9030             * triggered by guest instructions.)
   9031             */
   9032            mask &= ~CPSR_M;
   9033        } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
   9034            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
   9035             * v7, and has defined behaviour in v8:
   9036             *  + leave CPSR.M untouched
   9037             *  + allow changes to the other CPSR fields
   9038             *  + set PSTATE.IL
   9039             * For user changes via the GDB stub, we don't set PSTATE.IL,
   9040             * as this would be unnecessarily harsh for a user error.
   9041             */
   9042            mask &= ~CPSR_M;
   9043            if (write_type != CPSRWriteByGDBStub &&
   9044                arm_feature(env, ARM_FEATURE_V8)) {
   9045                mask |= CPSR_IL;
   9046                val |= CPSR_IL;
   9047            }
   9048            qemu_log_mask(LOG_GUEST_ERROR,
   9049                          "Illegal AArch32 mode switch attempt from %s to %s\n",
   9050                          aarch32_mode_name(env->uncached_cpsr),
   9051                          aarch32_mode_name(val));
   9052        } else {
   9053            qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
   9054                          write_type == CPSRWriteExceptionReturn ?
   9055                          "Exception return from AArch32" :
   9056                          "AArch32 mode switch from",
   9057                          aarch32_mode_name(env->uncached_cpsr),
   9058                          aarch32_mode_name(val), env->regs[15]);
   9059            switch_mode(env, val & CPSR_M);
   9060        }
   9061    }
   9062    mask &= ~CACHED_CPSR_BITS;
   9063    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
   9064    if (rebuild_hflags) {
   9065        arm_rebuild_hflags(env);
   9066    }
   9067}
   9068
   9069/* Sign/zero extend */
   9070uint32_t HELPER(sxtb16)(uint32_t x)
   9071{
   9072    uint32_t res;
   9073    res = (uint16_t)(int8_t)x;
   9074    res |= (uint32_t)(int8_t)(x >> 16) << 16;
   9075    return res;
   9076}
   9077
   9078static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
   9079{
   9080    /*
   9081     * Take a division-by-zero exception if necessary; otherwise return
   9082     * to get the usual non-trapping division behaviour (result of 0)
   9083     */
   9084    if (arm_feature(env, ARM_FEATURE_M)
   9085        && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
   9086        raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
   9087    }
   9088}
   9089
   9090uint32_t HELPER(uxtb16)(uint32_t x)
   9091{
   9092    uint32_t res;
   9093    res = (uint16_t)(uint8_t)x;
   9094    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
   9095    return res;
   9096}
   9097
   9098int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
   9099{
   9100    if (den == 0) {
   9101        handle_possible_div0_trap(env, GETPC());
   9102        return 0;
   9103    }
   9104    if (num == INT_MIN && den == -1) {
   9105        return INT_MIN;
   9106    }
   9107    return num / den;
   9108}
   9109
   9110uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
   9111{
   9112    if (den == 0) {
   9113        handle_possible_div0_trap(env, GETPC());
   9114        return 0;
   9115    }
   9116    return num / den;
   9117}
   9118
   9119uint32_t HELPER(rbit)(uint32_t x)
   9120{
   9121    return revbit32(x);
   9122}
   9123
   9124#ifdef CONFIG_USER_ONLY
   9125
   9126static void switch_mode(CPUARMState *env, int mode)
   9127{
   9128    ARMCPU *cpu = env_archcpu(env);
   9129
   9130    if (mode != ARM_CPU_MODE_USR) {
   9131        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
   9132    }
   9133}
   9134
   9135uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
   9136                                 uint32_t cur_el, bool secure)
   9137{
   9138    return 1;
   9139}
   9140
   9141void aarch64_sync_64_to_32(CPUARMState *env)
   9142{
   9143    g_assert_not_reached();
   9144}
   9145
   9146#else
   9147
   9148static void switch_mode(CPUARMState *env, int mode)
   9149{
   9150    int old_mode;
   9151    int i;
   9152
   9153    old_mode = env->uncached_cpsr & CPSR_M;
   9154    if (mode == old_mode)
   9155        return;
   9156
   9157    if (old_mode == ARM_CPU_MODE_FIQ) {
   9158        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
   9159        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
   9160    } else if (mode == ARM_CPU_MODE_FIQ) {
   9161        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
   9162        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
   9163    }
   9164
   9165    i = bank_number(old_mode);
   9166    env->banked_r13[i] = env->regs[13];
   9167    env->banked_spsr[i] = env->spsr;
   9168
   9169    i = bank_number(mode);
   9170    env->regs[13] = env->banked_r13[i];
   9171    env->spsr = env->banked_spsr[i];
   9172
   9173    env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
   9174    env->regs[14] = env->banked_r14[r14_bank_number(mode)];
   9175}
   9176
   9177/* Physical Interrupt Target EL Lookup Table
   9178 *
   9179 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
   9180 *
   9181 * The below multi-dimensional table is used for looking up the target
   9182 * exception level given numerous condition criteria.  Specifically, the
   9183 * target EL is based on SCR and HCR routing controls as well as the
   9184 * currently executing EL and secure state.
   9185 *
   9186 *    Dimensions:
   9187 *    target_el_table[2][2][2][2][2][4]
   9188 *                    |  |  |  |  |  +--- Current EL
   9189 *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
   9190 *                    |  |  |  +--------- HCR mask override
   9191 *                    |  |  +------------ SCR exec state control
   9192 *                    |  +--------------- SCR mask override
   9193 *                    +------------------ 32-bit(0)/64-bit(1) EL3
   9194 *
   9195 *    The table values are as such:
   9196 *    0-3 = EL0-EL3
   9197 *     -1 = Cannot occur
   9198 *
   9199 * The ARM ARM target EL table includes entries indicating that an "exception
   9200 * is not taken".  The two cases where this is applicable are:
   9201 *    1) An exception is taken from EL3 but the SCR does not have the exception
   9202 *    routed to EL3.
   9203 *    2) An exception is taken from EL2 but the HCR does not have the exception
   9204 *    routed to EL2.
   9205 * In these two cases, the below table contain a target of EL1.  This value is
   9206 * returned as it is expected that the consumer of the table data will check
   9207 * for "target EL >= current EL" to ensure the exception is not taken.
   9208 *
   9209 *            SCR     HCR
   9210 *         64  EA     AMO                 From
   9211 *        BIT IRQ     IMO      Non-secure         Secure
   9212 *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
   9213 */
   9214static const int8_t target_el_table[2][2][2][2][2][4] = {
   9215    {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
   9216       {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
   9217      {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
   9218       {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
   9219     {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
   9220       {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
   9221      {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
   9222       {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
   9223    {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
   9224       {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
   9225      {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
   9226       {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
   9227     {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
   9228       {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
   9229      {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
   9230       {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
   9231};
   9232
   9233/*
   9234 * Determine the target EL for physical exceptions
   9235 */
   9236uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
   9237                                 uint32_t cur_el, bool secure)
   9238{
   9239    CPUARMState *env = cs->env_ptr;
   9240    bool rw;
   9241    bool scr;
   9242    bool hcr;
   9243    int target_el;
   9244    /* Is the highest EL AArch64? */
   9245    bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
   9246    uint64_t hcr_el2;
   9247
   9248    if (arm_feature(env, ARM_FEATURE_EL3)) {
   9249        rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
   9250    } else {
   9251        /* Either EL2 is the highest EL (and so the EL2 register width
   9252         * is given by is64); or there is no EL2 or EL3, in which case
   9253         * the value of 'rw' does not affect the table lookup anyway.
   9254         */
   9255        rw = is64;
   9256    }
   9257
   9258    hcr_el2 = arm_hcr_el2_eff(env);
   9259    switch (excp_idx) {
   9260    case EXCP_IRQ:
   9261        scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
   9262        hcr = hcr_el2 & HCR_IMO;
   9263        break;
   9264    case EXCP_FIQ:
   9265        scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
   9266        hcr = hcr_el2 & HCR_FMO;
   9267        break;
   9268    default:
   9269        scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
   9270        hcr = hcr_el2 & HCR_AMO;
   9271        break;
   9272    };
   9273
   9274    /*
   9275     * For these purposes, TGE and AMO/IMO/FMO both force the
   9276     * interrupt to EL2.  Fold TGE into the bit extracted above.
   9277     */
   9278    hcr |= (hcr_el2 & HCR_TGE) != 0;
   9279
   9280    /* Perform a table-lookup for the target EL given the current state */
   9281    target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
   9282
   9283    assert(target_el > 0);
   9284
   9285    return target_el;
   9286}
   9287
   9288void arm_log_exception(int idx)
   9289{
   9290    if (qemu_loglevel_mask(CPU_LOG_INT)) {
   9291        const char *exc = NULL;
   9292        static const char * const excnames[] = {
   9293            [EXCP_UDEF] = "Undefined Instruction",
   9294            [EXCP_SWI] = "SVC",
   9295            [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
   9296            [EXCP_DATA_ABORT] = "Data Abort",
   9297            [EXCP_IRQ] = "IRQ",
   9298            [EXCP_FIQ] = "FIQ",
   9299            [EXCP_BKPT] = "Breakpoint",
   9300            [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
   9301            [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
   9302            [EXCP_HVC] = "Hypervisor Call",
   9303            [EXCP_HYP_TRAP] = "Hypervisor Trap",
   9304            [EXCP_SMC] = "Secure Monitor Call",
   9305            [EXCP_VIRQ] = "Virtual IRQ",
   9306            [EXCP_VFIQ] = "Virtual FIQ",
   9307            [EXCP_SEMIHOST] = "Semihosting call",
   9308            [EXCP_NOCP] = "v7M NOCP UsageFault",
   9309            [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
   9310            [EXCP_STKOF] = "v8M STKOF UsageFault",
   9311            [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
   9312            [EXCP_LSERR] = "v8M LSERR UsageFault",
   9313            [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
   9314            [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
   9315        };
   9316
   9317        if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
   9318            exc = excnames[idx];
   9319        }
   9320        if (!exc) {
   9321            exc = "unknown";
   9322        }
   9323        qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
   9324    }
   9325}
   9326
   9327/*
   9328 * Function used to synchronize QEMU's AArch64 register set with AArch32
   9329 * register set.  This is necessary when switching between AArch32 and AArch64
   9330 * execution state.
   9331 */
   9332void aarch64_sync_32_to_64(CPUARMState *env)
   9333{
   9334    int i;
   9335    uint32_t mode = env->uncached_cpsr & CPSR_M;
   9336
   9337    /* We can blanket copy R[0:7] to X[0:7] */
   9338    for (i = 0; i < 8; i++) {
   9339        env->xregs[i] = env->regs[i];
   9340    }
   9341
   9342    /*
   9343     * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
   9344     * Otherwise, they come from the banked user regs.
   9345     */
   9346    if (mode == ARM_CPU_MODE_FIQ) {
   9347        for (i = 8; i < 13; i++) {
   9348            env->xregs[i] = env->usr_regs[i - 8];
   9349        }
   9350    } else {
   9351        for (i = 8; i < 13; i++) {
   9352            env->xregs[i] = env->regs[i];
   9353        }
   9354    }
   9355
   9356    /*
   9357     * Registers x13-x23 are the various mode SP and FP registers. Registers
   9358     * r13 and r14 are only copied if we are in that mode, otherwise we copy
   9359     * from the mode banked register.
   9360     */
   9361    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
   9362        env->xregs[13] = env->regs[13];
   9363        env->xregs[14] = env->regs[14];
   9364    } else {
   9365        env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
   9366        /* HYP is an exception in that it is copied from r14 */
   9367        if (mode == ARM_CPU_MODE_HYP) {
   9368            env->xregs[14] = env->regs[14];
   9369        } else {
   9370            env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
   9371        }
   9372    }
   9373
   9374    if (mode == ARM_CPU_MODE_HYP) {
   9375        env->xregs[15] = env->regs[13];
   9376    } else {
   9377        env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
   9378    }
   9379
   9380    if (mode == ARM_CPU_MODE_IRQ) {
   9381        env->xregs[16] = env->regs[14];
   9382        env->xregs[17] = env->regs[13];
   9383    } else {
   9384        env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
   9385        env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
   9386    }
   9387
   9388    if (mode == ARM_CPU_MODE_SVC) {
   9389        env->xregs[18] = env->regs[14];
   9390        env->xregs[19] = env->regs[13];
   9391    } else {
   9392        env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
   9393        env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
   9394    }
   9395
   9396    if (mode == ARM_CPU_MODE_ABT) {
   9397        env->xregs[20] = env->regs[14];
   9398        env->xregs[21] = env->regs[13];
   9399    } else {
   9400        env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
   9401        env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
   9402    }
   9403
   9404    if (mode == ARM_CPU_MODE_UND) {
   9405        env->xregs[22] = env->regs[14];
   9406        env->xregs[23] = env->regs[13];
   9407    } else {
   9408        env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
   9409        env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
   9410    }
   9411
   9412    /*
   9413     * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
   9414     * mode, then we can copy from r8-r14.  Otherwise, we copy from the
   9415     * FIQ bank for r8-r14.
   9416     */
   9417    if (mode == ARM_CPU_MODE_FIQ) {
   9418        for (i = 24; i < 31; i++) {
   9419            env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
   9420        }
   9421    } else {
   9422        for (i = 24; i < 29; i++) {
   9423            env->xregs[i] = env->fiq_regs[i - 24];
   9424        }
   9425        env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
   9426        env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
   9427    }
   9428
   9429    env->pc = env->regs[15];
   9430}
   9431
   9432/*
   9433 * Function used to synchronize QEMU's AArch32 register set with AArch64
   9434 * register set.  This is necessary when switching between AArch32 and AArch64
   9435 * execution state.
   9436 */
   9437void aarch64_sync_64_to_32(CPUARMState *env)
   9438{
   9439    int i;
   9440    uint32_t mode = env->uncached_cpsr & CPSR_M;
   9441
   9442    /* We can blanket copy X[0:7] to R[0:7] */
   9443    for (i = 0; i < 8; i++) {
   9444        env->regs[i] = env->xregs[i];
   9445    }
   9446
   9447    /*
   9448     * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
   9449     * Otherwise, we copy x8-x12 into the banked user regs.
   9450     */
   9451    if (mode == ARM_CPU_MODE_FIQ) {
   9452        for (i = 8; i < 13; i++) {
   9453            env->usr_regs[i - 8] = env->xregs[i];
   9454        }
   9455    } else {
   9456        for (i = 8; i < 13; i++) {
   9457            env->regs[i] = env->xregs[i];
   9458        }
   9459    }
   9460
   9461    /*
   9462     * Registers r13 & r14 depend on the current mode.
   9463     * If we are in a given mode, we copy the corresponding x registers to r13
   9464     * and r14.  Otherwise, we copy the x register to the banked r13 and r14
   9465     * for the mode.
   9466     */
   9467    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
   9468        env->regs[13] = env->xregs[13];
   9469        env->regs[14] = env->xregs[14];
   9470    } else {
   9471        env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
   9472
   9473        /*
   9474         * HYP is an exception in that it does not have its own banked r14 but
   9475         * shares the USR r14
   9476         */
   9477        if (mode == ARM_CPU_MODE_HYP) {
   9478            env->regs[14] = env->xregs[14];
   9479        } else {
   9480            env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
   9481        }
   9482    }
   9483
   9484    if (mode == ARM_CPU_MODE_HYP) {
   9485        env->regs[13] = env->xregs[15];
   9486    } else {
   9487        env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
   9488    }
   9489
   9490    if (mode == ARM_CPU_MODE_IRQ) {
   9491        env->regs[14] = env->xregs[16];
   9492        env->regs[13] = env->xregs[17];
   9493    } else {
   9494        env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
   9495        env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
   9496    }
   9497
   9498    if (mode == ARM_CPU_MODE_SVC) {
   9499        env->regs[14] = env->xregs[18];
   9500        env->regs[13] = env->xregs[19];
   9501    } else {
   9502        env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
   9503        env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
   9504    }
   9505
   9506    if (mode == ARM_CPU_MODE_ABT) {
   9507        env->regs[14] = env->xregs[20];
   9508        env->regs[13] = env->xregs[21];
   9509    } else {
   9510        env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
   9511        env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
   9512    }
   9513
   9514    if (mode == ARM_CPU_MODE_UND) {
   9515        env->regs[14] = env->xregs[22];
   9516        env->regs[13] = env->xregs[23];
   9517    } else {
   9518        env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
   9519        env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
   9520    }
   9521
   9522    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
   9523     * mode, then we can copy to r8-r14.  Otherwise, we copy to the
   9524     * FIQ bank for r8-r14.
   9525     */
   9526    if (mode == ARM_CPU_MODE_FIQ) {
   9527        for (i = 24; i < 31; i++) {
   9528            env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
   9529        }
   9530    } else {
   9531        for (i = 24; i < 29; i++) {
   9532            env->fiq_regs[i - 24] = env->xregs[i];
   9533        }
   9534        env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
   9535        env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
   9536    }
   9537
   9538    env->regs[15] = env->pc;
   9539}
   9540
   9541static void take_aarch32_exception(CPUARMState *env, int new_mode,
   9542                                   uint32_t mask, uint32_t offset,
   9543                                   uint32_t newpc)
   9544{
   9545    int new_el;
   9546
   9547    /* Change the CPU state so as to actually take the exception. */
   9548    switch_mode(env, new_mode);
   9549
   9550    /*
   9551     * For exceptions taken to AArch32 we must clear the SS bit in both
   9552     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
   9553     */
   9554    env->pstate &= ~PSTATE_SS;
   9555    env->spsr = cpsr_read(env);
   9556    /* Clear IT bits.  */
   9557    env->condexec_bits = 0;
   9558    /* Switch to the new mode, and to the correct instruction set.  */
   9559    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
   9560
   9561    /* This must be after mode switching. */
   9562    new_el = arm_current_el(env);
   9563
   9564    /* Set new mode endianness */
   9565    env->uncached_cpsr &= ~CPSR_E;
   9566    if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
   9567        env->uncached_cpsr |= CPSR_E;
   9568    }
   9569    /* J and IL must always be cleared for exception entry */
   9570    env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
   9571    env->daif |= mask;
   9572
   9573    if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
   9574        if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
   9575            env->uncached_cpsr |= CPSR_SSBS;
   9576        } else {
   9577            env->uncached_cpsr &= ~CPSR_SSBS;
   9578        }
   9579    }
   9580
   9581    if (new_mode == ARM_CPU_MODE_HYP) {
   9582        env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
   9583        env->elr_el[2] = env->regs[15];
   9584    } else {
   9585        /* CPSR.PAN is normally preserved preserved unless...  */
   9586        if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
   9587            switch (new_el) {
   9588            case 3:
   9589                if (!arm_is_secure_below_el3(env)) {
   9590                    /* ... the target is EL3, from non-secure state.  */
   9591                    env->uncached_cpsr &= ~CPSR_PAN;
   9592                    break;
   9593                }
   9594                /* ... the target is EL3, from secure state ... */
   9595                /* fall through */
   9596            case 1:
   9597                /* ... the target is EL1 and SCTLR.SPAN is 0.  */
   9598                if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
   9599                    env->uncached_cpsr |= CPSR_PAN;
   9600                }
   9601                break;
   9602            }
   9603        }
   9604        /*
   9605         * this is a lie, as there was no c1_sys on V4T/V5, but who cares
   9606         * and we should just guard the thumb mode on V4
   9607         */
   9608        if (arm_feature(env, ARM_FEATURE_V4T)) {
   9609            env->thumb =
   9610                (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
   9611        }
   9612        env->regs[14] = env->regs[15] + offset;
   9613    }
   9614    env->regs[15] = newpc;
   9615    arm_rebuild_hflags(env);
   9616}
   9617
   9618static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
   9619{
   9620    /*
   9621     * Handle exception entry to Hyp mode; this is sufficiently
   9622     * different to entry to other AArch32 modes that we handle it
   9623     * separately here.
   9624     *
   9625     * The vector table entry used is always the 0x14 Hyp mode entry point,
   9626     * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
   9627     * The offset applied to the preferred return address is always zero
   9628     * (see DDI0487C.a section G1.12.3).
   9629     * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
   9630     */
   9631    uint32_t addr, mask;
   9632    ARMCPU *cpu = ARM_CPU(cs);
   9633    CPUARMState *env = &cpu->env;
   9634
   9635    switch (cs->exception_index) {
   9636    case EXCP_UDEF:
   9637        addr = 0x04;
   9638        break;
   9639    case EXCP_SWI:
   9640        addr = 0x14;
   9641        break;
   9642    case EXCP_BKPT:
   9643        /* Fall through to prefetch abort.  */
   9644    case EXCP_PREFETCH_ABORT:
   9645        env->cp15.ifar_s = env->exception.vaddress;
   9646        qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
   9647                      (uint32_t)env->exception.vaddress);
   9648        addr = 0x0c;
   9649        break;
   9650    case EXCP_DATA_ABORT:
   9651        env->cp15.dfar_s = env->exception.vaddress;
   9652        qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
   9653                      (uint32_t)env->exception.vaddress);
   9654        addr = 0x10;
   9655        break;
   9656    case EXCP_IRQ:
   9657        addr = 0x18;
   9658        break;
   9659    case EXCP_FIQ:
   9660        addr = 0x1c;
   9661        break;
   9662    case EXCP_HVC:
   9663        addr = 0x08;
   9664        break;
   9665    case EXCP_HYP_TRAP:
   9666        addr = 0x14;
   9667        break;
   9668    default:
   9669        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
   9670    }
   9671
   9672    if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
   9673        if (!arm_feature(env, ARM_FEATURE_V8)) {
   9674            /*
   9675             * QEMU syndrome values are v8-style. v7 has the IL bit
   9676             * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
   9677             * If this is a v7 CPU, squash the IL bit in those cases.
   9678             */
   9679            if (cs->exception_index == EXCP_PREFETCH_ABORT ||
   9680                (cs->exception_index == EXCP_DATA_ABORT &&
   9681                 !(env->exception.syndrome & ARM_EL_ISV)) ||
   9682                syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
   9683                env->exception.syndrome &= ~ARM_EL_IL;
   9684            }
   9685        }
   9686        env->cp15.esr_el[2] = env->exception.syndrome;
   9687    }
   9688
   9689    if (arm_current_el(env) != 2 && addr < 0x14) {
   9690        addr = 0x14;
   9691    }
   9692
   9693    mask = 0;
   9694    if (!(env->cp15.scr_el3 & SCR_EA)) {
   9695        mask |= CPSR_A;
   9696    }
   9697    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
   9698        mask |= CPSR_I;
   9699    }
   9700    if (!(env->cp15.scr_el3 & SCR_FIQ)) {
   9701        mask |= CPSR_F;
   9702    }
   9703
   9704    addr += env->cp15.hvbar;
   9705
   9706    take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
   9707}
   9708
   9709static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
   9710{
   9711    ARMCPU *cpu = ARM_CPU(cs);
   9712    CPUARMState *env = &cpu->env;
   9713    uint32_t addr;
   9714    uint32_t mask;
   9715    int new_mode;
   9716    uint32_t offset;
   9717    uint32_t moe;
   9718
   9719    /* If this is a debug exception we must update the DBGDSCR.MOE bits */
   9720    switch (syn_get_ec(env->exception.syndrome)) {
   9721    case EC_BREAKPOINT:
   9722    case EC_BREAKPOINT_SAME_EL:
   9723        moe = 1;
   9724        break;
   9725    case EC_WATCHPOINT:
   9726    case EC_WATCHPOINT_SAME_EL:
   9727        moe = 10;
   9728        break;
   9729    case EC_AA32_BKPT:
   9730        moe = 3;
   9731        break;
   9732    case EC_VECTORCATCH:
   9733        moe = 5;
   9734        break;
   9735    default:
   9736        moe = 0;
   9737        break;
   9738    }
   9739
   9740    if (moe) {
   9741        env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
   9742    }
   9743
   9744    if (env->exception.target_el == 2) {
   9745        arm_cpu_do_interrupt_aarch32_hyp(cs);
   9746        return;
   9747    }
   9748
   9749    switch (cs->exception_index) {
   9750    case EXCP_UDEF:
   9751        new_mode = ARM_CPU_MODE_UND;
   9752        addr = 0x04;
   9753        mask = CPSR_I;
   9754        if (env->thumb)
   9755            offset = 2;
   9756        else
   9757            offset = 4;
   9758        break;
   9759    case EXCP_SWI:
   9760        new_mode = ARM_CPU_MODE_SVC;
   9761        addr = 0x08;
   9762        mask = CPSR_I;
   9763        /* The PC already points to the next instruction.  */
   9764        offset = 0;
   9765        break;
   9766    case EXCP_BKPT:
   9767        /* Fall through to prefetch abort.  */
   9768    case EXCP_PREFETCH_ABORT:
   9769        A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
   9770        A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
   9771        qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
   9772                      env->exception.fsr, (uint32_t)env->exception.vaddress);
   9773        new_mode = ARM_CPU_MODE_ABT;
   9774        addr = 0x0c;
   9775        mask = CPSR_A | CPSR_I;
   9776        offset = 4;
   9777        break;
   9778    case EXCP_DATA_ABORT:
   9779        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
   9780        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
   9781        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
   9782                      env->exception.fsr,
   9783                      (uint32_t)env->exception.vaddress);
   9784        new_mode = ARM_CPU_MODE_ABT;
   9785        addr = 0x10;
   9786        mask = CPSR_A | CPSR_I;
   9787        offset = 8;
   9788        break;
   9789    case EXCP_IRQ:
   9790        new_mode = ARM_CPU_MODE_IRQ;
   9791        addr = 0x18;
   9792        /* Disable IRQ and imprecise data aborts.  */
   9793        mask = CPSR_A | CPSR_I;
   9794        offset = 4;
   9795        if (env->cp15.scr_el3 & SCR_IRQ) {
   9796            /* IRQ routed to monitor mode */
   9797            new_mode = ARM_CPU_MODE_MON;
   9798            mask |= CPSR_F;
   9799        }
   9800        break;
   9801    case EXCP_FIQ:
   9802        new_mode = ARM_CPU_MODE_FIQ;
   9803        addr = 0x1c;
   9804        /* Disable FIQ, IRQ and imprecise data aborts.  */
   9805        mask = CPSR_A | CPSR_I | CPSR_F;
   9806        if (env->cp15.scr_el3 & SCR_FIQ) {
   9807            /* FIQ routed to monitor mode */
   9808            new_mode = ARM_CPU_MODE_MON;
   9809        }
   9810        offset = 4;
   9811        break;
   9812    case EXCP_VIRQ:
   9813        new_mode = ARM_CPU_MODE_IRQ;
   9814        addr = 0x18;
   9815        /* Disable IRQ and imprecise data aborts.  */
   9816        mask = CPSR_A | CPSR_I;
   9817        offset = 4;
   9818        break;
   9819    case EXCP_VFIQ:
   9820        new_mode = ARM_CPU_MODE_FIQ;
   9821        addr = 0x1c;
   9822        /* Disable FIQ, IRQ and imprecise data aborts.  */
   9823        mask = CPSR_A | CPSR_I | CPSR_F;
   9824        offset = 4;
   9825        break;
   9826    case EXCP_SMC:
   9827        new_mode = ARM_CPU_MODE_MON;
   9828        addr = 0x08;
   9829        mask = CPSR_A | CPSR_I | CPSR_F;
   9830        offset = 0;
   9831        break;
   9832    default:
   9833        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
   9834        return; /* Never happens.  Keep compiler happy.  */
   9835    }
   9836
   9837    if (new_mode == ARM_CPU_MODE_MON) {
   9838        addr += env->cp15.mvbar;
   9839    } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
   9840        /* High vectors. When enabled, base address cannot be remapped. */
   9841        addr += 0xffff0000;
   9842    } else {
   9843        /* ARM v7 architectures provide a vector base address register to remap
   9844         * the interrupt vector table.
   9845         * This register is only followed in non-monitor mode, and is banked.
   9846         * Note: only bits 31:5 are valid.
   9847         */
   9848        addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
   9849    }
   9850
   9851    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
   9852        env->cp15.scr_el3 &= ~SCR_NS;
   9853    }
   9854
   9855    take_aarch32_exception(env, new_mode, mask, offset, addr);
   9856}
   9857
   9858static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
   9859{
   9860    /*
   9861     * Return the register number of the AArch64 view of the AArch32
   9862     * register @aarch32_reg. The CPUARMState CPSR is assumed to still
   9863     * be that of the AArch32 mode the exception came from.
   9864     */
   9865    int mode = env->uncached_cpsr & CPSR_M;
   9866
   9867    switch (aarch32_reg) {
   9868    case 0 ... 7:
   9869        return aarch32_reg;
   9870    case 8 ... 12:
   9871        return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
   9872    case 13:
   9873        switch (mode) {
   9874        case ARM_CPU_MODE_USR:
   9875        case ARM_CPU_MODE_SYS:
   9876            return 13;
   9877        case ARM_CPU_MODE_HYP:
   9878            return 15;
   9879        case ARM_CPU_MODE_IRQ:
   9880            return 17;
   9881        case ARM_CPU_MODE_SVC:
   9882            return 19;
   9883        case ARM_CPU_MODE_ABT:
   9884            return 21;
   9885        case ARM_CPU_MODE_UND:
   9886            return 23;
   9887        case ARM_CPU_MODE_FIQ:
   9888            return 29;
   9889        default:
   9890            g_assert_not_reached();
   9891        }
   9892    case 14:
   9893        switch (mode) {
   9894        case ARM_CPU_MODE_USR:
   9895        case ARM_CPU_MODE_SYS:
   9896        case ARM_CPU_MODE_HYP:
   9897            return 14;
   9898        case ARM_CPU_MODE_IRQ:
   9899            return 16;
   9900        case ARM_CPU_MODE_SVC:
   9901            return 18;
   9902        case ARM_CPU_MODE_ABT:
   9903            return 20;
   9904        case ARM_CPU_MODE_UND:
   9905            return 22;
   9906        case ARM_CPU_MODE_FIQ:
   9907            return 30;
   9908        default:
   9909            g_assert_not_reached();
   9910        }
   9911    case 15:
   9912        return 31;
   9913    default:
   9914        g_assert_not_reached();
   9915    }
   9916}
   9917
   9918static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
   9919{
   9920    uint32_t ret = cpsr_read(env);
   9921
   9922    /* Move DIT to the correct location for SPSR_ELx */
   9923    if (ret & CPSR_DIT) {
   9924        ret &= ~CPSR_DIT;
   9925        ret |= PSTATE_DIT;
   9926    }
   9927    /* Merge PSTATE.SS into SPSR_ELx */
   9928    ret |= env->pstate & PSTATE_SS;
   9929
   9930    return ret;
   9931}
   9932
   9933/* Handle exception entry to a target EL which is using AArch64 */
   9934static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
   9935{
   9936    ARMCPU *cpu = ARM_CPU(cs);
   9937    CPUARMState *env = &cpu->env;
   9938    unsigned int new_el = env->exception.target_el;
   9939    target_ulong addr = env->cp15.vbar_el[new_el];
   9940    unsigned int new_mode = aarch64_pstate_mode(new_el, true);
   9941    unsigned int old_mode;
   9942    unsigned int cur_el = arm_current_el(env);
   9943    int rt;
   9944
   9945    /*
   9946     * Note that new_el can never be 0.  If cur_el is 0, then
   9947     * el0_a64 is is_a64(), else el0_a64 is ignored.
   9948     */
   9949    aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
   9950
   9951    if (cur_el < new_el) {
   9952        /* Entry vector offset depends on whether the implemented EL
   9953         * immediately lower than the target level is using AArch32 or AArch64
   9954         */
   9955        bool is_aa64;
   9956        uint64_t hcr;
   9957
   9958        switch (new_el) {
   9959        case 3:
   9960            is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
   9961            break;
   9962        case 2:
   9963            hcr = arm_hcr_el2_eff(env);
   9964            if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
   9965                is_aa64 = (hcr & HCR_RW) != 0;
   9966                break;
   9967            }
   9968            /* fall through */
   9969        case 1:
   9970            is_aa64 = is_a64(env);
   9971            break;
   9972        default:
   9973            g_assert_not_reached();
   9974        }
   9975
   9976        if (is_aa64) {
   9977            addr += 0x400;
   9978        } else {
   9979            addr += 0x600;
   9980        }
   9981    } else if (pstate_read(env) & PSTATE_SP) {
   9982        addr += 0x200;
   9983    }
   9984
   9985    switch (cs->exception_index) {
   9986    case EXCP_PREFETCH_ABORT:
   9987    case EXCP_DATA_ABORT:
   9988        env->cp15.far_el[new_el] = env->exception.vaddress;
   9989        qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
   9990                      env->cp15.far_el[new_el]);
   9991        /* fall through */
   9992    case EXCP_BKPT:
   9993    case EXCP_UDEF:
   9994    case EXCP_SWI:
   9995    case EXCP_HVC:
   9996    case EXCP_HYP_TRAP:
   9997    case EXCP_SMC:
   9998        switch (syn_get_ec(env->exception.syndrome)) {
   9999        case EC_ADVSIMDFPACCESSTRAP:
  10000            /*
  10001             * QEMU internal FP/SIMD syndromes from AArch32 include the
  10002             * TA and coproc fields which are only exposed if the exception
  10003             * is taken to AArch32 Hyp mode. Mask them out to get a valid
  10004             * AArch64 format syndrome.
  10005             */
  10006            env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
  10007            break;
  10008        case EC_CP14RTTRAP:
  10009        case EC_CP15RTTRAP:
  10010        case EC_CP14DTTRAP:
  10011            /*
  10012             * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
  10013             * the raw register field from the insn; when taking this to
  10014             * AArch64 we must convert it to the AArch64 view of the register
  10015             * number. Notice that we read a 4-bit AArch32 register number and
  10016             * write back a 5-bit AArch64 one.
  10017             */
  10018            rt = extract32(env->exception.syndrome, 5, 4);
  10019            rt = aarch64_regnum(env, rt);
  10020            env->exception.syndrome = deposit32(env->exception.syndrome,
  10021                                                5, 5, rt);
  10022            break;
  10023        case EC_CP15RRTTRAP:
  10024        case EC_CP14RRTTRAP:
  10025            /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
  10026            rt = extract32(env->exception.syndrome, 5, 4);
  10027            rt = aarch64_regnum(env, rt);
  10028            env->exception.syndrome = deposit32(env->exception.syndrome,
  10029                                                5, 5, rt);
  10030            rt = extract32(env->exception.syndrome, 10, 4);
  10031            rt = aarch64_regnum(env, rt);
  10032            env->exception.syndrome = deposit32(env->exception.syndrome,
  10033                                                10, 5, rt);
  10034            break;
  10035        }
  10036        env->cp15.esr_el[new_el] = env->exception.syndrome;
  10037        break;
  10038    case EXCP_IRQ:
  10039    case EXCP_VIRQ:
  10040        addr += 0x80;
  10041        break;
  10042    case EXCP_FIQ:
  10043    case EXCP_VFIQ:
  10044        addr += 0x100;
  10045        break;
  10046    default:
  10047        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
  10048    }
  10049
  10050    if (is_a64(env)) {
  10051        old_mode = pstate_read(env);
  10052        aarch64_save_sp(env, arm_current_el(env));
  10053        env->elr_el[new_el] = env->pc;
  10054    } else {
  10055        old_mode = cpsr_read_for_spsr_elx(env);
  10056        env->elr_el[new_el] = env->regs[15];
  10057
  10058        aarch64_sync_32_to_64(env);
  10059
  10060        env->condexec_bits = 0;
  10061    }
  10062    env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
  10063
  10064    qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
  10065                  env->elr_el[new_el]);
  10066
  10067    if (cpu_isar_feature(aa64_pan, cpu)) {
  10068        /* The value of PSTATE.PAN is normally preserved, except when ... */
  10069        new_mode |= old_mode & PSTATE_PAN;
  10070        switch (new_el) {
  10071        case 2:
  10072            /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
  10073            if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
  10074                != (HCR_E2H | HCR_TGE)) {
  10075                break;
  10076            }
  10077            /* fall through */
  10078        case 1:
  10079            /* ... the target is EL1 ... */
  10080            /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
  10081            if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
  10082                new_mode |= PSTATE_PAN;
  10083            }
  10084            break;
  10085        }
  10086    }
  10087    if (cpu_isar_feature(aa64_mte, cpu)) {
  10088        new_mode |= PSTATE_TCO;
  10089    }
  10090
  10091    if (cpu_isar_feature(aa64_ssbs, cpu)) {
  10092        if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
  10093            new_mode |= PSTATE_SSBS;
  10094        } else {
  10095            new_mode &= ~PSTATE_SSBS;
  10096        }
  10097    }
  10098
  10099    pstate_write(env, PSTATE_DAIF | new_mode);
  10100    env->aarch64 = 1;
  10101    aarch64_restore_sp(env, new_el);
  10102    helper_rebuild_hflags_a64(env, new_el);
  10103
  10104    env->pc = addr;
  10105
  10106    qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
  10107                  new_el, env->pc, pstate_read(env));
  10108}
  10109
  10110/*
  10111 * Do semihosting call and set the appropriate return value. All the
  10112 * permission and validity checks have been done at translate time.
  10113 *
  10114 * We only see semihosting exceptions in TCG only as they are not
  10115 * trapped to the hypervisor in KVM.
  10116 */
  10117#ifdef CONFIG_TCG
  10118static void handle_semihosting(CPUState *cs)
  10119{
  10120    ARMCPU *cpu = ARM_CPU(cs);
  10121    CPUARMState *env = &cpu->env;
  10122
  10123    if (is_a64(env)) {
  10124        qemu_log_mask(CPU_LOG_INT,
  10125                      "...handling as semihosting call 0x%" PRIx64 "\n",
  10126                      env->xregs[0]);
  10127        env->xregs[0] = do_common_semihosting(cs);
  10128        env->pc += 4;
  10129    } else {
  10130        qemu_log_mask(CPU_LOG_INT,
  10131                      "...handling as semihosting call 0x%x\n",
  10132                      env->regs[0]);
  10133        env->regs[0] = do_common_semihosting(cs);
  10134        env->regs[15] += env->thumb ? 2 : 4;
  10135    }
  10136}
  10137#endif
  10138
  10139/* Handle a CPU exception for A and R profile CPUs.
  10140 * Do any appropriate logging, handle PSCI calls, and then hand off
  10141 * to the AArch64-entry or AArch32-entry function depending on the
  10142 * target exception level's register width.
  10143 *
  10144 * Note: this is used for both TCG (as the do_interrupt tcg op),
  10145 *       and KVM to re-inject guest debug exceptions, and to
  10146 *       inject a Synchronous-External-Abort.
  10147 */
  10148void arm_cpu_do_interrupt(CPUState *cs)
  10149{
  10150    ARMCPU *cpu = ARM_CPU(cs);
  10151    CPUARMState *env = &cpu->env;
  10152    unsigned int new_el = env->exception.target_el;
  10153
  10154    assert(!arm_feature(env, ARM_FEATURE_M));
  10155
  10156    arm_log_exception(cs->exception_index);
  10157    qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
  10158                  new_el);
  10159    if (qemu_loglevel_mask(CPU_LOG_INT)
  10160        && !excp_is_internal(cs->exception_index)) {
  10161        qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
  10162                      syn_get_ec(env->exception.syndrome),
  10163                      env->exception.syndrome);
  10164    }
  10165
  10166    if (arm_is_psci_call(cpu, cs->exception_index)) {
  10167        arm_handle_psci_call(cpu);
  10168        qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
  10169        return;
  10170    }
  10171
  10172    /*
  10173     * Semihosting semantics depend on the register width of the code
  10174     * that caused the exception, not the target exception level, so
  10175     * must be handled here.
  10176     */
  10177#ifdef CONFIG_TCG
  10178    if (cs->exception_index == EXCP_SEMIHOST) {
  10179        handle_semihosting(cs);
  10180        return;
  10181    }
  10182#endif
  10183
  10184    /* Hooks may change global state so BQL should be held, also the
  10185     * BQL needs to be held for any modification of
  10186     * cs->interrupt_request.
  10187     */
  10188    g_assert(qemu_mutex_iothread_locked());
  10189
  10190    arm_call_pre_el_change_hook(cpu);
  10191
  10192    assert(!excp_is_internal(cs->exception_index));
  10193    if (arm_el_is_aa64(env, new_el)) {
  10194        arm_cpu_do_interrupt_aarch64(cs);
  10195    } else {
  10196        arm_cpu_do_interrupt_aarch32(cs);
  10197    }
  10198
  10199    arm_call_el_change_hook(cpu);
  10200
  10201    if (!kvm_enabled()) {
  10202        cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
  10203    }
  10204}
  10205#endif /* !CONFIG_USER_ONLY */
  10206
  10207uint64_t arm_sctlr(CPUARMState *env, int el)
  10208{
  10209    /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
  10210    if (el == 0) {
  10211        ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
  10212        el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
  10213             ? 2 : 1;
  10214    }
  10215    return env->cp15.sctlr_el[el];
  10216}
  10217
  10218/* Return the SCTLR value which controls this address translation regime */
  10219static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
  10220{
  10221    return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
  10222}
  10223
  10224#ifndef CONFIG_USER_ONLY
  10225
  10226/* Return true if the specified stage of address translation is disabled */
  10227static inline bool regime_translation_disabled(CPUARMState *env,
  10228                                               ARMMMUIdx mmu_idx)
  10229{
  10230    uint64_t hcr_el2;
  10231
  10232    if (arm_feature(env, ARM_FEATURE_M)) {
  10233        switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
  10234                (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
  10235        case R_V7M_MPU_CTRL_ENABLE_MASK:
  10236            /* Enabled, but not for HardFault and NMI */
  10237            return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
  10238        case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
  10239            /* Enabled for all cases */
  10240            return false;
  10241        case 0:
  10242        default:
  10243            /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
  10244             * we warned about that in armv7m_nvic.c when the guest set it.
  10245             */
  10246            return true;
  10247        }
  10248    }
  10249
  10250    hcr_el2 = arm_hcr_el2_eff(env);
  10251
  10252    if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  10253        /* HCR.DC means HCR.VM behaves as 1 */
  10254        return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
  10255    }
  10256
  10257    if (hcr_el2 & HCR_TGE) {
  10258        /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
  10259        if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
  10260            return true;
  10261        }
  10262    }
  10263
  10264    if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
  10265        /* HCR.DC means SCTLR_EL1.M behaves as 0 */
  10266        return true;
  10267    }
  10268
  10269    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
  10270}
  10271
  10272static inline bool regime_translation_big_endian(CPUARMState *env,
  10273                                                 ARMMMUIdx mmu_idx)
  10274{
  10275    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
  10276}
  10277
  10278/* Return the TTBR associated with this translation regime */
  10279static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
  10280                                   int ttbrn)
  10281{
  10282    if (mmu_idx == ARMMMUIdx_Stage2) {
  10283        return env->cp15.vttbr_el2;
  10284    }
  10285    if (mmu_idx == ARMMMUIdx_Stage2_S) {
  10286        return env->cp15.vsttbr_el2;
  10287    }
  10288    if (ttbrn == 0) {
  10289        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
  10290    } else {
  10291        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
  10292    }
  10293}
  10294
  10295#endif /* !CONFIG_USER_ONLY */
  10296
  10297/* Convert a possible stage1+2 MMU index into the appropriate
  10298 * stage 1 MMU index
  10299 */
  10300static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
  10301{
  10302    switch (mmu_idx) {
  10303    case ARMMMUIdx_SE10_0:
  10304        return ARMMMUIdx_Stage1_SE0;
  10305    case ARMMMUIdx_SE10_1:
  10306        return ARMMMUIdx_Stage1_SE1;
  10307    case ARMMMUIdx_SE10_1_PAN:
  10308        return ARMMMUIdx_Stage1_SE1_PAN;
  10309    case ARMMMUIdx_E10_0:
  10310        return ARMMMUIdx_Stage1_E0;
  10311    case ARMMMUIdx_E10_1:
  10312        return ARMMMUIdx_Stage1_E1;
  10313    case ARMMMUIdx_E10_1_PAN:
  10314        return ARMMMUIdx_Stage1_E1_PAN;
  10315    default:
  10316        return mmu_idx;
  10317    }
  10318}
  10319
  10320/* Return true if the translation regime is using LPAE format page tables */
  10321static inline bool regime_using_lpae_format(CPUARMState *env,
  10322                                            ARMMMUIdx mmu_idx)
  10323{
  10324    int el = regime_el(env, mmu_idx);
  10325    if (el == 2 || arm_el_is_aa64(env, el)) {
  10326        return true;
  10327    }
  10328    if (arm_feature(env, ARM_FEATURE_LPAE)
  10329        && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
  10330        return true;
  10331    }
  10332    return false;
  10333}
  10334
  10335/* Returns true if the stage 1 translation regime is using LPAE format page
  10336 * tables. Used when raising alignment exceptions, whose FSR changes depending
  10337 * on whether the long or short descriptor format is in use. */
  10338bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
  10339{
  10340    mmu_idx = stage_1_mmu_idx(mmu_idx);
  10341
  10342    return regime_using_lpae_format(env, mmu_idx);
  10343}
  10344
  10345#ifndef CONFIG_USER_ONLY
  10346static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
  10347{
  10348    switch (mmu_idx) {
  10349    case ARMMMUIdx_SE10_0:
  10350    case ARMMMUIdx_E20_0:
  10351    case ARMMMUIdx_SE20_0:
  10352    case ARMMMUIdx_Stage1_E0:
  10353    case ARMMMUIdx_Stage1_SE0:
  10354    case ARMMMUIdx_MUser:
  10355    case ARMMMUIdx_MSUser:
  10356    case ARMMMUIdx_MUserNegPri:
  10357    case ARMMMUIdx_MSUserNegPri:
  10358        return true;
  10359    default:
  10360        return false;
  10361    case ARMMMUIdx_E10_0:
  10362    case ARMMMUIdx_E10_1:
  10363    case ARMMMUIdx_E10_1_PAN:
  10364        g_assert_not_reached();
  10365    }
  10366}
  10367
  10368/* Translate section/page access permissions to page
  10369 * R/W protection flags
  10370 *
  10371 * @env:         CPUARMState
  10372 * @mmu_idx:     MMU index indicating required translation regime
  10373 * @ap:          The 3-bit access permissions (AP[2:0])
  10374 * @domain_prot: The 2-bit domain access permissions
  10375 */
  10376static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
  10377                                int ap, int domain_prot)
  10378{
  10379    bool is_user = regime_is_user(env, mmu_idx);
  10380
  10381    if (domain_prot == 3) {
  10382        return PAGE_READ | PAGE_WRITE;
  10383    }
  10384
  10385    switch (ap) {
  10386    case 0:
  10387        if (arm_feature(env, ARM_FEATURE_V7)) {
  10388            return 0;
  10389        }
  10390        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
  10391        case SCTLR_S:
  10392            return is_user ? 0 : PAGE_READ;
  10393        case SCTLR_R:
  10394            return PAGE_READ;
  10395        default:
  10396            return 0;
  10397        }
  10398    case 1:
  10399        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
  10400    case 2:
  10401        if (is_user) {
  10402            return PAGE_READ;
  10403        } else {
  10404            return PAGE_READ | PAGE_WRITE;
  10405        }
  10406    case 3:
  10407        return PAGE_READ | PAGE_WRITE;
  10408    case 4: /* Reserved.  */
  10409        return 0;
  10410    case 5:
  10411        return is_user ? 0 : PAGE_READ;
  10412    case 6:
  10413        return PAGE_READ;
  10414    case 7:
  10415        if (!arm_feature(env, ARM_FEATURE_V6K)) {
  10416            return 0;
  10417        }
  10418        return PAGE_READ;
  10419    default:
  10420        g_assert_not_reached();
  10421    }
  10422}
  10423
  10424/* Translate section/page access permissions to page
  10425 * R/W protection flags.
  10426 *
  10427 * @ap:      The 2-bit simple AP (AP[2:1])
  10428 * @is_user: TRUE if accessing from PL0
  10429 */
  10430static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
  10431{
  10432    switch (ap) {
  10433    case 0:
  10434        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
  10435    case 1:
  10436        return PAGE_READ | PAGE_WRITE;
  10437    case 2:
  10438        return is_user ? 0 : PAGE_READ;
  10439    case 3:
  10440        return PAGE_READ;
  10441    default:
  10442        g_assert_not_reached();
  10443    }
  10444}
  10445
  10446static inline int
  10447simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
  10448{
  10449    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
  10450}
  10451
  10452/* Translate S2 section/page access permissions to protection flags
  10453 *
  10454 * @env:     CPUARMState
  10455 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
  10456 * @xn:      XN (execute-never) bits
  10457 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
  10458 */
  10459static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
  10460{
  10461    int prot = 0;
  10462
  10463    if (s2ap & 1) {
  10464        prot |= PAGE_READ;
  10465    }
  10466    if (s2ap & 2) {
  10467        prot |= PAGE_WRITE;
  10468    }
  10469
  10470    if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
  10471        switch (xn) {
  10472        case 0:
  10473            prot |= PAGE_EXEC;
  10474            break;
  10475        case 1:
  10476            if (s1_is_el0) {
  10477                prot |= PAGE_EXEC;
  10478            }
  10479            break;
  10480        case 2:
  10481            break;
  10482        case 3:
  10483            if (!s1_is_el0) {
  10484                prot |= PAGE_EXEC;
  10485            }
  10486            break;
  10487        default:
  10488            g_assert_not_reached();
  10489        }
  10490    } else {
  10491        if (!extract32(xn, 1, 1)) {
  10492            if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
  10493                prot |= PAGE_EXEC;
  10494            }
  10495        }
  10496    }
  10497    return prot;
  10498}
  10499
  10500/* Translate section/page access permissions to protection flags
  10501 *
  10502 * @env:     CPUARMState
  10503 * @mmu_idx: MMU index indicating required translation regime
  10504 * @is_aa64: TRUE if AArch64
  10505 * @ap:      The 2-bit simple AP (AP[2:1])
  10506 * @ns:      NS (non-secure) bit
  10507 * @xn:      XN (execute-never) bit
  10508 * @pxn:     PXN (privileged execute-never) bit
  10509 */
  10510static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
  10511                      int ap, int ns, int xn, int pxn)
  10512{
  10513    bool is_user = regime_is_user(env, mmu_idx);
  10514    int prot_rw, user_rw;
  10515    bool have_wxn;
  10516    int wxn = 0;
  10517
  10518    assert(mmu_idx != ARMMMUIdx_Stage2);
  10519    assert(mmu_idx != ARMMMUIdx_Stage2_S);
  10520
  10521    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
  10522    if (is_user) {
  10523        prot_rw = user_rw;
  10524    } else {
  10525        if (user_rw && regime_is_pan(env, mmu_idx)) {
  10526            /* PAN forbids data accesses but doesn't affect insn fetch */
  10527            prot_rw = 0;
  10528        } else {
  10529            prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
  10530        }
  10531    }
  10532
  10533    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
  10534        return prot_rw;
  10535    }
  10536
  10537    /* TODO have_wxn should be replaced with
  10538     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
  10539     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
  10540     * compatible processors have EL2, which is required for [U]WXN.
  10541     */
  10542    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
  10543
  10544    if (have_wxn) {
  10545        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
  10546    }
  10547
  10548    if (is_aa64) {
  10549        if (regime_has_2_ranges(mmu_idx) && !is_user) {
  10550            xn = pxn || (user_rw & PAGE_WRITE);
  10551        }
  10552    } else if (arm_feature(env, ARM_FEATURE_V7)) {
  10553        switch (regime_el(env, mmu_idx)) {
  10554        case 1:
  10555        case 3:
  10556            if (is_user) {
  10557                xn = xn || !(user_rw & PAGE_READ);
  10558            } else {
  10559                int uwxn = 0;
  10560                if (have_wxn) {
  10561                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
  10562                }
  10563                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
  10564                     (uwxn && (user_rw & PAGE_WRITE));
  10565            }
  10566            break;
  10567        case 2:
  10568            break;
  10569        }
  10570    } else {
  10571        xn = wxn = 0;
  10572    }
  10573
  10574    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
  10575        return prot_rw;
  10576    }
  10577    return prot_rw | PAGE_EXEC;
  10578}
  10579
  10580static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
  10581                                     uint32_t *table, uint32_t address)
  10582{
  10583    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
  10584    TCR *tcr = regime_tcr(env, mmu_idx);
  10585
  10586    if (address & tcr->mask) {
  10587        if (tcr->raw_tcr & TTBCR_PD1) {
  10588            /* Translation table walk disabled for TTBR1 */
  10589            return false;
  10590        }
  10591        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
  10592    } else {
  10593        if (tcr->raw_tcr & TTBCR_PD0) {
  10594            /* Translation table walk disabled for TTBR0 */
  10595            return false;
  10596        }
  10597        *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
  10598    }
  10599    *table |= (address >> 18) & 0x3ffc;
  10600    return true;
  10601}
  10602
  10603/* Translate a S1 pagetable walk through S2 if needed.  */
  10604static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
  10605                               hwaddr addr, bool *is_secure,
  10606                               ARMMMUFaultInfo *fi)
  10607{
  10608    if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
  10609        !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
  10610        target_ulong s2size;
  10611        hwaddr s2pa;
  10612        int s2prot;
  10613        int ret;
  10614        ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
  10615                                          : ARMMMUIdx_Stage2;
  10616        ARMCacheAttrs cacheattrs = {};
  10617        MemTxAttrs txattrs = {};
  10618
  10619        ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
  10620                                 &s2pa, &txattrs, &s2prot, &s2size, fi,
  10621                                 &cacheattrs);
  10622        if (ret) {
  10623            assert(fi->type != ARMFault_None);
  10624            fi->s2addr = addr;
  10625            fi->stage2 = true;
  10626            fi->s1ptw = true;
  10627            fi->s1ns = !*is_secure;
  10628            return ~0;
  10629        }
  10630        if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
  10631            (cacheattrs.attrs & 0xf0) == 0) {
  10632            /*
  10633             * PTW set and S1 walk touched S2 Device memory:
  10634             * generate Permission fault.
  10635             */
  10636            fi->type = ARMFault_Permission;
  10637            fi->s2addr = addr;
  10638            fi->stage2 = true;
  10639            fi->s1ptw = true;
  10640            fi->s1ns = !*is_secure;
  10641            return ~0;
  10642        }
  10643
  10644        if (arm_is_secure_below_el3(env)) {
  10645            /* Check if page table walk is to secure or non-secure PA space. */
  10646            if (*is_secure) {
  10647                *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
  10648            } else {
  10649                *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
  10650            }
  10651        } else {
  10652            assert(!*is_secure);
  10653        }
  10654
  10655        addr = s2pa;
  10656    }
  10657    return addr;
  10658}
  10659
  10660/* All loads done in the course of a page table walk go through here. */
  10661static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
  10662                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
  10663{
  10664    ARMCPU *cpu = ARM_CPU(cs);
  10665    CPUARMState *env = &cpu->env;
  10666    MemTxAttrs attrs = {};
  10667    MemTxResult result = MEMTX_OK;
  10668    AddressSpace *as;
  10669    uint32_t data;
  10670
  10671    addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
  10672    attrs.secure = is_secure;
  10673    as = arm_addressspace(cs, attrs);
  10674    if (fi->s1ptw) {
  10675        return 0;
  10676    }
  10677    if (regime_translation_big_endian(env, mmu_idx)) {
  10678        data = address_space_ldl_be(as, addr, attrs, &result);
  10679    } else {
  10680        data = address_space_ldl_le(as, addr, attrs, &result);
  10681    }
  10682    if (result == MEMTX_OK) {
  10683        return data;
  10684    }
  10685    fi->type = ARMFault_SyncExternalOnWalk;
  10686    fi->ea = arm_extabort_type(result);
  10687    return 0;
  10688}
  10689
  10690static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
  10691                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
  10692{
  10693    ARMCPU *cpu = ARM_CPU(cs);
  10694    CPUARMState *env = &cpu->env;
  10695    MemTxAttrs attrs = {};
  10696    MemTxResult result = MEMTX_OK;
  10697    AddressSpace *as;
  10698    uint64_t data;
  10699
  10700    addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
  10701    attrs.secure = is_secure;
  10702    as = arm_addressspace(cs, attrs);
  10703    if (fi->s1ptw) {
  10704        return 0;
  10705    }
  10706    if (regime_translation_big_endian(env, mmu_idx)) {
  10707        data = address_space_ldq_be(as, addr, attrs, &result);
  10708    } else {
  10709        data = address_space_ldq_le(as, addr, attrs, &result);
  10710    }
  10711    if (result == MEMTX_OK) {
  10712        return data;
  10713    }
  10714    fi->type = ARMFault_SyncExternalOnWalk;
  10715    fi->ea = arm_extabort_type(result);
  10716    return 0;
  10717}
  10718
  10719static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
  10720                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
  10721                             hwaddr *phys_ptr, int *prot,
  10722                             target_ulong *page_size,
  10723                             ARMMMUFaultInfo *fi)
  10724{
  10725    CPUState *cs = env_cpu(env);
  10726    int level = 1;
  10727    uint32_t table;
  10728    uint32_t desc;
  10729    int type;
  10730    int ap;
  10731    int domain = 0;
  10732    int domain_prot;
  10733    hwaddr phys_addr;
  10734    uint32_t dacr;
  10735
  10736    /* Pagetable walk.  */
  10737    /* Lookup l1 descriptor.  */
  10738    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
  10739        /* Section translation fault if page walk is disabled by PD0 or PD1 */
  10740        fi->type = ARMFault_Translation;
  10741        goto do_fault;
  10742    }
  10743    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
  10744                       mmu_idx, fi);
  10745    if (fi->type != ARMFault_None) {
  10746        goto do_fault;
  10747    }
  10748    type = (desc & 3);
  10749    domain = (desc >> 5) & 0x0f;
  10750    if (regime_el(env, mmu_idx) == 1) {
  10751        dacr = env->cp15.dacr_ns;
  10752    } else {
  10753        dacr = env->cp15.dacr_s;
  10754    }
  10755    domain_prot = (dacr >> (domain * 2)) & 3;
  10756    if (type == 0) {
  10757        /* Section translation fault.  */
  10758        fi->type = ARMFault_Translation;
  10759        goto do_fault;
  10760    }
  10761    if (type != 2) {
  10762        level = 2;
  10763    }
  10764    if (domain_prot == 0 || domain_prot == 2) {
  10765        fi->type = ARMFault_Domain;
  10766        goto do_fault;
  10767    }
  10768    if (type == 2) {
  10769        /* 1Mb section.  */
  10770        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
  10771        ap = (desc >> 10) & 3;
  10772        *page_size = 1024 * 1024;
  10773    } else {
  10774        /* Lookup l2 entry.  */
  10775        if (type == 1) {
  10776            /* Coarse pagetable.  */
  10777            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
  10778        } else {
  10779            /* Fine pagetable.  */
  10780            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
  10781        }
  10782        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
  10783                           mmu_idx, fi);
  10784        if (fi->type != ARMFault_None) {
  10785            goto do_fault;
  10786        }
  10787        switch (desc & 3) {
  10788        case 0: /* Page translation fault.  */
  10789            fi->type = ARMFault_Translation;
  10790            goto do_fault;
  10791        case 1: /* 64k page.  */
  10792            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
  10793            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
  10794            *page_size = 0x10000;
  10795            break;
  10796        case 2: /* 4k page.  */
  10797            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
  10798            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
  10799            *page_size = 0x1000;
  10800            break;
  10801        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
  10802            if (type == 1) {
  10803                /* ARMv6/XScale extended small page format */
  10804                if (arm_feature(env, ARM_FEATURE_XSCALE)
  10805                    || arm_feature(env, ARM_FEATURE_V6)) {
  10806                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
  10807                    *page_size = 0x1000;
  10808                } else {
  10809                    /* UNPREDICTABLE in ARMv5; we choose to take a
  10810                     * page translation fault.
  10811                     */
  10812                    fi->type = ARMFault_Translation;
  10813                    goto do_fault;
  10814                }
  10815            } else {
  10816                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
  10817                *page_size = 0x400;
  10818            }
  10819            ap = (desc >> 4) & 3;
  10820            break;
  10821        default:
  10822            /* Never happens, but compiler isn't smart enough to tell.  */
  10823            abort();
  10824        }
  10825    }
  10826    *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
  10827    *prot |= *prot ? PAGE_EXEC : 0;
  10828    if (!(*prot & (1 << access_type))) {
  10829        /* Access permission fault.  */
  10830        fi->type = ARMFault_Permission;
  10831        goto do_fault;
  10832    }
  10833    *phys_ptr = phys_addr;
  10834    return false;
  10835do_fault:
  10836    fi->domain = domain;
  10837    fi->level = level;
  10838    return true;
  10839}
  10840
  10841static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
  10842                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
  10843                             hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  10844                             target_ulong *page_size, ARMMMUFaultInfo *fi)
  10845{
  10846    CPUState *cs = env_cpu(env);
  10847    ARMCPU *cpu = env_archcpu(env);
  10848    int level = 1;
  10849    uint32_t table;
  10850    uint32_t desc;
  10851    uint32_t xn;
  10852    uint32_t pxn = 0;
  10853    int type;
  10854    int ap;
  10855    int domain = 0;
  10856    int domain_prot;
  10857    hwaddr phys_addr;
  10858    uint32_t dacr;
  10859    bool ns;
  10860
  10861    /* Pagetable walk.  */
  10862    /* Lookup l1 descriptor.  */
  10863    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
  10864        /* Section translation fault if page walk is disabled by PD0 or PD1 */
  10865        fi->type = ARMFault_Translation;
  10866        goto do_fault;
  10867    }
  10868    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
  10869                       mmu_idx, fi);
  10870    if (fi->type != ARMFault_None) {
  10871        goto do_fault;
  10872    }
  10873    type = (desc & 3);
  10874    if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
  10875        /* Section translation fault, or attempt to use the encoding
  10876         * which is Reserved on implementations without PXN.
  10877         */
  10878        fi->type = ARMFault_Translation;
  10879        goto do_fault;
  10880    }
  10881    if ((type == 1) || !(desc & (1 << 18))) {
  10882        /* Page or Section.  */
  10883        domain = (desc >> 5) & 0x0f;
  10884    }
  10885    if (regime_el(env, mmu_idx) == 1) {
  10886        dacr = env->cp15.dacr_ns;
  10887    } else {
  10888        dacr = env->cp15.dacr_s;
  10889    }
  10890    if (type == 1) {
  10891        level = 2;
  10892    }
  10893    domain_prot = (dacr >> (domain * 2)) & 3;
  10894    if (domain_prot == 0 || domain_prot == 2) {
  10895        /* Section or Page domain fault */
  10896        fi->type = ARMFault_Domain;
  10897        goto do_fault;
  10898    }
  10899    if (type != 1) {
  10900        if (desc & (1 << 18)) {
  10901            /* Supersection.  */
  10902            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
  10903            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
  10904            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
  10905            *page_size = 0x1000000;
  10906        } else {
  10907            /* Section.  */
  10908            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
  10909            *page_size = 0x100000;
  10910        }
  10911        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
  10912        xn = desc & (1 << 4);
  10913        pxn = desc & 1;
  10914        ns = extract32(desc, 19, 1);
  10915    } else {
  10916        if (cpu_isar_feature(aa32_pxn, cpu)) {
  10917            pxn = (desc >> 2) & 1;
  10918        }
  10919        ns = extract32(desc, 3, 1);
  10920        /* Lookup l2 entry.  */
  10921        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
  10922        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
  10923                           mmu_idx, fi);
  10924        if (fi->type != ARMFault_None) {
  10925            goto do_fault;
  10926        }
  10927        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
  10928        switch (desc & 3) {
  10929        case 0: /* Page translation fault.  */
  10930            fi->type = ARMFault_Translation;
  10931            goto do_fault;
  10932        case 1: /* 64k page.  */
  10933            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
  10934            xn = desc & (1 << 15);
  10935            *page_size = 0x10000;
  10936            break;
  10937        case 2: case 3: /* 4k page.  */
  10938            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
  10939            xn = desc & 1;
  10940            *page_size = 0x1000;
  10941            break;
  10942        default:
  10943            /* Never happens, but compiler isn't smart enough to tell.  */
  10944            abort();
  10945        }
  10946    }
  10947    if (domain_prot == 3) {
  10948        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
  10949    } else {
  10950        if (pxn && !regime_is_user(env, mmu_idx)) {
  10951            xn = 1;
  10952        }
  10953        if (xn && access_type == MMU_INST_FETCH) {
  10954            fi->type = ARMFault_Permission;
  10955            goto do_fault;
  10956        }
  10957
  10958        if (arm_feature(env, ARM_FEATURE_V6K) &&
  10959                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
  10960            /* The simplified model uses AP[0] as an access control bit.  */
  10961            if ((ap & 1) == 0) {
  10962                /* Access flag fault.  */
  10963                fi->type = ARMFault_AccessFlag;
  10964                goto do_fault;
  10965            }
  10966            *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
  10967        } else {
  10968            *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
  10969        }
  10970        if (*prot && !xn) {
  10971            *prot |= PAGE_EXEC;
  10972        }
  10973        if (!(*prot & (1 << access_type))) {
  10974            /* Access permission fault.  */
  10975            fi->type = ARMFault_Permission;
  10976            goto do_fault;
  10977        }
  10978    }
  10979    if (ns) {
  10980        /* The NS bit will (as required by the architecture) have no effect if
  10981         * the CPU doesn't support TZ or this is a non-secure translation
  10982         * regime, because the attribute will already be non-secure.
  10983         */
  10984        attrs->secure = false;
  10985    }
  10986    *phys_ptr = phys_addr;
  10987    return false;
  10988do_fault:
  10989    fi->domain = domain;
  10990    fi->level = level;
  10991    return true;
  10992}
  10993
  10994/*
  10995 * check_s2_mmu_setup
  10996 * @cpu:        ARMCPU
  10997 * @is_aa64:    True if the translation regime is in AArch64 state
  10998 * @startlevel: Suggested starting level
  10999 * @inputsize:  Bitsize of IPAs
  11000 * @stride:     Page-table stride (See the ARM ARM)
  11001 *
  11002 * Returns true if the suggested S2 translation parameters are OK and
  11003 * false otherwise.
  11004 */
  11005static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
  11006                               int inputsize, int stride)
  11007{
  11008    const int grainsize = stride + 3;
  11009    int startsizecheck;
  11010
  11011    /* Negative levels are never allowed.  */
  11012    if (level < 0) {
  11013        return false;
  11014    }
  11015
  11016    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
  11017    if (startsizecheck < 1 || startsizecheck > stride + 4) {
  11018        return false;
  11019    }
  11020
  11021    if (is_aa64) {
  11022        CPUARMState *env = &cpu->env;
  11023        unsigned int pamax = arm_pamax(cpu);
  11024
  11025        switch (stride) {
  11026        case 13: /* 64KB Pages.  */
  11027            if (level == 0 || (level == 1 && pamax <= 42)) {
  11028                return false;
  11029            }
  11030            break;
  11031        case 11: /* 16KB Pages.  */
  11032            if (level == 0 || (level == 1 && pamax <= 40)) {
  11033                return false;
  11034            }
  11035            break;
  11036        case 9: /* 4KB Pages.  */
  11037            if (level == 0 && pamax <= 42) {
  11038                return false;
  11039            }
  11040            break;
  11041        default:
  11042            g_assert_not_reached();
  11043        }
  11044
  11045        /* Inputsize checks.  */
  11046        if (inputsize > pamax &&
  11047            (arm_el_is_aa64(env, 1) || inputsize > 40)) {
  11048            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
  11049            return false;
  11050        }
  11051    } else {
  11052        /* AArch32 only supports 4KB pages. Assert on that.  */
  11053        assert(stride == 9);
  11054
  11055        if (level == 0) {
  11056            return false;
  11057        }
  11058    }
  11059    return true;
  11060}
  11061
  11062/* Translate from the 4-bit stage 2 representation of
  11063 * memory attributes (without cache-allocation hints) to
  11064 * the 8-bit representation of the stage 1 MAIR registers
  11065 * (which includes allocation hints).
  11066 *
  11067 * ref: shared/translation/attrs/S2AttrDecode()
  11068 *      .../S2ConvertAttrsHints()
  11069 */
  11070static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
  11071{
  11072    uint8_t hiattr = extract32(s2attrs, 2, 2);
  11073    uint8_t loattr = extract32(s2attrs, 0, 2);
  11074    uint8_t hihint = 0, lohint = 0;
  11075
  11076    if (hiattr != 0) { /* normal memory */
  11077        if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
  11078            hiattr = loattr = 1; /* non-cacheable */
  11079        } else {
  11080            if (hiattr != 1) { /* Write-through or write-back */
  11081                hihint = 3; /* RW allocate */
  11082            }
  11083            if (loattr != 1) { /* Write-through or write-back */
  11084                lohint = 3; /* RW allocate */
  11085            }
  11086        }
  11087    }
  11088
  11089    return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
  11090}
  11091#endif /* !CONFIG_USER_ONLY */
  11092
  11093static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
  11094{
  11095    if (regime_has_2_ranges(mmu_idx)) {
  11096        return extract64(tcr, 37, 2);
  11097    } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  11098        return 0; /* VTCR_EL2 */
  11099    } else {
  11100        /* Replicate the single TBI bit so we always have 2 bits.  */
  11101        return extract32(tcr, 20, 1) * 3;
  11102    }
  11103}
  11104
  11105static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
  11106{
  11107    if (regime_has_2_ranges(mmu_idx)) {
  11108        return extract64(tcr, 51, 2);
  11109    } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  11110        return 0; /* VTCR_EL2 */
  11111    } else {
  11112        /* Replicate the single TBID bit so we always have 2 bits.  */
  11113        return extract32(tcr, 29, 1) * 3;
  11114    }
  11115}
  11116
  11117static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
  11118{
  11119    if (regime_has_2_ranges(mmu_idx)) {
  11120        return extract64(tcr, 57, 2);
  11121    } else {
  11122        /* Replicate the single TCMA bit so we always have 2 bits.  */
  11123        return extract32(tcr, 30, 1) * 3;
  11124    }
  11125}
  11126
  11127ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
  11128                                   ARMMMUIdx mmu_idx, bool data)
  11129{
  11130    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
  11131    bool epd, hpd, using16k, using64k;
  11132    int select, tsz, tbi, max_tsz;
  11133
  11134    if (!regime_has_2_ranges(mmu_idx)) {
  11135        select = 0;
  11136        tsz = extract32(tcr, 0, 6);
  11137        using64k = extract32(tcr, 14, 1);
  11138        using16k = extract32(tcr, 15, 1);
  11139        if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  11140            /* VTCR_EL2 */
  11141            hpd = false;
  11142        } else {
  11143            hpd = extract32(tcr, 24, 1);
  11144        }
  11145        epd = false;
  11146    } else {
  11147        /*
  11148         * Bit 55 is always between the two regions, and is canonical for
  11149         * determining if address tagging is enabled.
  11150         */
  11151        select = extract64(va, 55, 1);
  11152        if (!select) {
  11153            tsz = extract32(tcr, 0, 6);
  11154            epd = extract32(tcr, 7, 1);
  11155            using64k = extract32(tcr, 14, 1);
  11156            using16k = extract32(tcr, 15, 1);
  11157            hpd = extract64(tcr, 41, 1);
  11158        } else {
  11159            int tg = extract32(tcr, 30, 2);
  11160            using16k = tg == 1;
  11161            using64k = tg == 3;
  11162            tsz = extract32(tcr, 16, 6);
  11163            epd = extract32(tcr, 23, 1);
  11164            hpd = extract64(tcr, 42, 1);
  11165        }
  11166    }
  11167
  11168    if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
  11169        max_tsz = 48 - using64k;
  11170    } else {
  11171        max_tsz = 39;
  11172    }
  11173
  11174    tsz = MIN(tsz, max_tsz);
  11175    tsz = MAX(tsz, 16);  /* TODO: ARMv8.2-LVA  */
  11176
  11177    /* Present TBI as a composite with TBID.  */
  11178    tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
  11179    if (!data) {
  11180        tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
  11181    }
  11182    tbi = (tbi >> select) & 1;
  11183
  11184    return (ARMVAParameters) {
  11185        .tsz = tsz,
  11186        .select = select,
  11187        .tbi = tbi,
  11188        .epd = epd,
  11189        .hpd = hpd,
  11190        .using16k = using16k,
  11191        .using64k = using64k,
  11192    };
  11193}
  11194
  11195#ifndef CONFIG_USER_ONLY
  11196static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
  11197                                          ARMMMUIdx mmu_idx)
  11198{
  11199    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
  11200    uint32_t el = regime_el(env, mmu_idx);
  11201    int select, tsz;
  11202    bool epd, hpd;
  11203
  11204    assert(mmu_idx != ARMMMUIdx_Stage2_S);
  11205
  11206    if (mmu_idx == ARMMMUIdx_Stage2) {
  11207        /* VTCR */
  11208        bool sext = extract32(tcr, 4, 1);
  11209        bool sign = extract32(tcr, 3, 1);
  11210
  11211        /*
  11212         * If the sign-extend bit is not the same as t0sz[3], the result
  11213         * is unpredictable. Flag this as a guest error.
  11214         */
  11215        if (sign != sext) {
  11216            qemu_log_mask(LOG_GUEST_ERROR,
  11217                          "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
  11218        }
  11219        tsz = sextract32(tcr, 0, 4) + 8;
  11220        select = 0;
  11221        hpd = false;
  11222        epd = false;
  11223    } else if (el == 2) {
  11224        /* HTCR */
  11225        tsz = extract32(tcr, 0, 3);
  11226        select = 0;
  11227        hpd = extract64(tcr, 24, 1);
  11228        epd = false;
  11229    } else {
  11230        int t0sz = extract32(tcr, 0, 3);
  11231        int t1sz = extract32(tcr, 16, 3);
  11232
  11233        if (t1sz == 0) {
  11234            select = va > (0xffffffffu >> t0sz);
  11235        } else {
  11236            /* Note that we will detect errors later.  */
  11237            select = va >= ~(0xffffffffu >> t1sz);
  11238        }
  11239        if (!select) {
  11240            tsz = t0sz;
  11241            epd = extract32(tcr, 7, 1);
  11242            hpd = extract64(tcr, 41, 1);
  11243        } else {
  11244            tsz = t1sz;
  11245            epd = extract32(tcr, 23, 1);
  11246            hpd = extract64(tcr, 42, 1);
  11247        }
  11248        /* For aarch32, hpd0 is not enabled without t2e as well.  */
  11249        hpd &= extract32(tcr, 6, 1);
  11250    }
  11251
  11252    return (ARMVAParameters) {
  11253        .tsz = tsz,
  11254        .select = select,
  11255        .epd = epd,
  11256        .hpd = hpd,
  11257    };
  11258}
  11259
  11260/**
  11261 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
  11262 *
  11263 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
  11264 * prot and page_size may not be filled in, and the populated fsr value provides
  11265 * information on why the translation aborted, in the format of a long-format
  11266 * DFSR/IFSR fault register, with the following caveats:
  11267 *  * the WnR bit is never set (the caller must do this).
  11268 *
  11269 * @env: CPUARMState
  11270 * @address: virtual address to get physical address for
  11271 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
  11272 * @mmu_idx: MMU index indicating required translation regime
  11273 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
  11274 *             walk), must be true if this is stage 2 of a stage 1+2 walk for an
  11275 *             EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
  11276 * @phys_ptr: set to the physical address corresponding to the virtual address
  11277 * @attrs: set to the memory transaction attributes to use
  11278 * @prot: set to the permissions for the page containing phys_ptr
  11279 * @page_size_ptr: set to the size of the page containing phys_ptr
  11280 * @fi: set to fault info if the translation fails
  11281 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
  11282 */
  11283static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
  11284                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
  11285                               bool s1_is_el0,
  11286                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  11287                               target_ulong *page_size_ptr,
  11288                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
  11289{
  11290    ARMCPU *cpu = env_archcpu(env);
  11291    CPUState *cs = CPU(cpu);
  11292    /* Read an LPAE long-descriptor translation table. */
  11293    ARMFaultType fault_type = ARMFault_Translation;
  11294    uint32_t level;
  11295    ARMVAParameters param;
  11296    uint64_t ttbr;
  11297    hwaddr descaddr, indexmask, indexmask_grainsize;
  11298    uint32_t tableattrs;
  11299    target_ulong page_size;
  11300    uint32_t attrs;
  11301    int32_t stride;
  11302    int addrsize, inputsize;
  11303    TCR *tcr = regime_tcr(env, mmu_idx);
  11304    int ap, ns, xn, pxn;
  11305    uint32_t el = regime_el(env, mmu_idx);
  11306    uint64_t descaddrmask;
  11307    bool aarch64 = arm_el_is_aa64(env, el);
  11308    bool guarded = false;
  11309
  11310    /* TODO: This code does not support shareability levels. */
  11311    if (aarch64) {
  11312        param = aa64_va_parameters(env, address, mmu_idx,
  11313                                   access_type != MMU_INST_FETCH);
  11314        level = 0;
  11315        addrsize = 64 - 8 * param.tbi;
  11316        inputsize = 64 - param.tsz;
  11317    } else {
  11318        param = aa32_va_parameters(env, address, mmu_idx);
  11319        level = 1;
  11320        addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
  11321        inputsize = addrsize - param.tsz;
  11322    }
  11323
  11324    /*
  11325     * We determined the region when collecting the parameters, but we
  11326     * have not yet validated that the address is valid for the region.
  11327     * Extract the top bits and verify that they all match select.
  11328     *
  11329     * For aa32, if inputsize == addrsize, then we have selected the
  11330     * region by exclusion in aa32_va_parameters and there is no more
  11331     * validation to do here.
  11332     */
  11333    if (inputsize < addrsize) {
  11334        target_ulong top_bits = sextract64(address, inputsize,
  11335                                           addrsize - inputsize);
  11336        if (-top_bits != param.select) {
  11337            /* The gap between the two regions is a Translation fault */
  11338            fault_type = ARMFault_Translation;
  11339            goto do_fault;
  11340        }
  11341    }
  11342
  11343    if (param.using64k) {
  11344        stride = 13;
  11345    } else if (param.using16k) {
  11346        stride = 11;
  11347    } else {
  11348        stride = 9;
  11349    }
  11350
  11351    /* Note that QEMU ignores shareability and cacheability attributes,
  11352     * so we don't need to do anything with the SH, ORGN, IRGN fields
  11353     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
  11354     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
  11355     * implement any ASID-like capability so we can ignore it (instead
  11356     * we will always flush the TLB any time the ASID is changed).
  11357     */
  11358    ttbr = regime_ttbr(env, mmu_idx, param.select);
  11359
  11360    /* Here we should have set up all the parameters for the translation:
  11361     * inputsize, ttbr, epd, stride, tbi
  11362     */
  11363
  11364    if (param.epd) {
  11365        /* Translation table walk disabled => Translation fault on TLB miss
  11366         * Note: This is always 0 on 64-bit EL2 and EL3.
  11367         */
  11368        goto do_fault;
  11369    }
  11370
  11371    if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
  11372        /* The starting level depends on the virtual address size (which can
  11373         * be up to 48 bits) and the translation granule size. It indicates
  11374         * the number of strides (stride bits at a time) needed to
  11375         * consume the bits of the input address. In the pseudocode this is:
  11376         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
  11377         * where their 'inputsize' is our 'inputsize', 'grainsize' is
  11378         * our 'stride + 3' and 'stride' is our 'stride'.
  11379         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
  11380         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
  11381         * = 4 - (inputsize - 4) / stride;
  11382         */
  11383        level = 4 - (inputsize - 4) / stride;
  11384    } else {
  11385        /* For stage 2 translations the starting level is specified by the
  11386         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
  11387         */
  11388        uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
  11389        uint32_t startlevel;
  11390        bool ok;
  11391
  11392        if (!aarch64 || stride == 9) {
  11393            /* AArch32 or 4KB pages */
  11394            startlevel = 2 - sl0;
  11395
  11396            if (cpu_isar_feature(aa64_st, cpu)) {
  11397                startlevel &= 3;
  11398            }
  11399        } else {
  11400            /* 16KB or 64KB pages */
  11401            startlevel = 3 - sl0;
  11402        }
  11403
  11404        /* Check that the starting level is valid. */
  11405        ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
  11406                                inputsize, stride);
  11407        if (!ok) {
  11408            fault_type = ARMFault_Translation;
  11409            goto do_fault;
  11410        }
  11411        level = startlevel;
  11412    }
  11413
  11414    indexmask_grainsize = (1ULL << (stride + 3)) - 1;
  11415    indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
  11416
  11417    /* Now we can extract the actual base address from the TTBR */
  11418    descaddr = extract64(ttbr, 0, 48);
  11419    /*
  11420     * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
  11421     * and also to mask out CnP (bit 0) which could validly be non-zero.
  11422     */
  11423    descaddr &= ~indexmask;
  11424
  11425    /* The address field in the descriptor goes up to bit 39 for ARMv7
  11426     * but up to bit 47 for ARMv8, but we use the descaddrmask
  11427     * up to bit 39 for AArch32, because we don't need other bits in that case
  11428     * to construct next descriptor address (anyway they should be all zeroes).
  11429     */
  11430    descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
  11431                   ~indexmask_grainsize;
  11432
  11433    /* Secure accesses start with the page table in secure memory and
  11434     * can be downgraded to non-secure at any step. Non-secure accesses
  11435     * remain non-secure. We implement this by just ORing in the NSTable/NS
  11436     * bits at each step.
  11437     */
  11438    tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
  11439    for (;;) {
  11440        uint64_t descriptor;
  11441        bool nstable;
  11442
  11443        descaddr |= (address >> (stride * (4 - level))) & indexmask;
  11444        descaddr &= ~7ULL;
  11445        nstable = extract32(tableattrs, 4, 1);
  11446        descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
  11447        if (fi->type != ARMFault_None) {
  11448            goto do_fault;
  11449        }
  11450
  11451        if (!(descriptor & 1) ||
  11452            (!(descriptor & 2) && (level == 3))) {
  11453            /* Invalid, or the Reserved level 3 encoding */
  11454            goto do_fault;
  11455        }
  11456        descaddr = descriptor & descaddrmask;
  11457
  11458        if ((descriptor & 2) && (level < 3)) {
  11459            /* Table entry. The top five bits are attributes which may
  11460             * propagate down through lower levels of the table (and
  11461             * which are all arranged so that 0 means "no effect", so
  11462             * we can gather them up by ORing in the bits at each level).
  11463             */
  11464            tableattrs |= extract64(descriptor, 59, 5);
  11465            level++;
  11466            indexmask = indexmask_grainsize;
  11467            continue;
  11468        }
  11469        /* Block entry at level 1 or 2, or page entry at level 3.
  11470         * These are basically the same thing, although the number
  11471         * of bits we pull in from the vaddr varies.
  11472         */
  11473        page_size = (1ULL << ((stride * (4 - level)) + 3));
  11474        descaddr |= (address & (page_size - 1));
  11475        /* Extract attributes from the descriptor */
  11476        attrs = extract64(descriptor, 2, 10)
  11477            | (extract64(descriptor, 52, 12) << 10);
  11478
  11479        if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  11480            /* Stage 2 table descriptors do not include any attribute fields */
  11481            break;
  11482        }
  11483        /* Merge in attributes from table descriptors */
  11484        attrs |= nstable << 3; /* NS */
  11485        guarded = extract64(descriptor, 50, 1);  /* GP */
  11486        if (param.hpd) {
  11487            /* HPD disables all the table attributes except NSTable.  */
  11488            break;
  11489        }
  11490        attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
  11491        /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
  11492         * means "force PL1 access only", which means forcing AP[1] to 0.
  11493         */
  11494        attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
  11495        attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
  11496        break;
  11497    }
  11498    /* Here descaddr is the final physical address, and attributes
  11499     * are all in attrs.
  11500     */
  11501    fault_type = ARMFault_AccessFlag;
  11502    if ((attrs & (1 << 8)) == 0) {
  11503        /* Access flag */
  11504        goto do_fault;
  11505    }
  11506
  11507    ap = extract32(attrs, 4, 2);
  11508
  11509    if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  11510        ns = mmu_idx == ARMMMUIdx_Stage2;
  11511        xn = extract32(attrs, 11, 2);
  11512        *prot = get_S2prot(env, ap, xn, s1_is_el0);
  11513    } else {
  11514        ns = extract32(attrs, 3, 1);
  11515        xn = extract32(attrs, 12, 1);
  11516        pxn = extract32(attrs, 11, 1);
  11517        *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
  11518    }
  11519
  11520    fault_type = ARMFault_Permission;
  11521    if (!(*prot & (1 << access_type))) {
  11522        goto do_fault;
  11523    }
  11524
  11525    if (ns) {
  11526        /* The NS bit will (as required by the architecture) have no effect if
  11527         * the CPU doesn't support TZ or this is a non-secure translation
  11528         * regime, because the attribute will already be non-secure.
  11529         */
  11530        txattrs->secure = false;
  11531    }
  11532    /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
  11533    if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
  11534        arm_tlb_bti_gp(txattrs) = true;
  11535    }
  11536
  11537    if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
  11538        cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
  11539    } else {
  11540        /* Index into MAIR registers for cache attributes */
  11541        uint8_t attrindx = extract32(attrs, 0, 3);
  11542        uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
  11543        assert(attrindx <= 7);
  11544        cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
  11545    }
  11546    cacheattrs->shareability = extract32(attrs, 6, 2);
  11547
  11548    *phys_ptr = descaddr;
  11549    *page_size_ptr = page_size;
  11550    return false;
  11551
  11552do_fault:
  11553    fi->type = fault_type;
  11554    fi->level = level;
  11555    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
  11556    fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
  11557                               mmu_idx == ARMMMUIdx_Stage2_S);
  11558    fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
  11559    return true;
  11560}
  11561
  11562static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
  11563                                                ARMMMUIdx mmu_idx,
  11564                                                int32_t address, int *prot)
  11565{
  11566    if (!arm_feature(env, ARM_FEATURE_M)) {
  11567        *prot = PAGE_READ | PAGE_WRITE;
  11568        switch (address) {
  11569        case 0xF0000000 ... 0xFFFFFFFF:
  11570            if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
  11571                /* hivecs execing is ok */
  11572                *prot |= PAGE_EXEC;
  11573            }
  11574            break;
  11575        case 0x00000000 ... 0x7FFFFFFF:
  11576            *prot |= PAGE_EXEC;
  11577            break;
  11578        }
  11579    } else {
  11580        /* Default system address map for M profile cores.
  11581         * The architecture specifies which regions are execute-never;
  11582         * at the MPU level no other checks are defined.
  11583         */
  11584        switch (address) {
  11585        case 0x00000000 ... 0x1fffffff: /* ROM */
  11586        case 0x20000000 ... 0x3fffffff: /* SRAM */
  11587        case 0x60000000 ... 0x7fffffff: /* RAM */
  11588        case 0x80000000 ... 0x9fffffff: /* RAM */
  11589            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
  11590            break;
  11591        case 0x40000000 ... 0x5fffffff: /* Peripheral */
  11592        case 0xa0000000 ... 0xbfffffff: /* Device */
  11593        case 0xc0000000 ... 0xdfffffff: /* Device */
  11594        case 0xe0000000 ... 0xffffffff: /* System */
  11595            *prot = PAGE_READ | PAGE_WRITE;
  11596            break;
  11597        default:
  11598            g_assert_not_reached();
  11599        }
  11600    }
  11601}
  11602
  11603static bool pmsav7_use_background_region(ARMCPU *cpu,
  11604                                         ARMMMUIdx mmu_idx, bool is_user)
  11605{
  11606    /* Return true if we should use the default memory map as a
  11607     * "background" region if there are no hits against any MPU regions.
  11608     */
  11609    CPUARMState *env = &cpu->env;
  11610
  11611    if (is_user) {
  11612        return false;
  11613    }
  11614
  11615    if (arm_feature(env, ARM_FEATURE_M)) {
  11616        return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
  11617            & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
  11618    } else {
  11619        return regime_sctlr(env, mmu_idx) & SCTLR_BR;
  11620    }
  11621}
  11622
  11623static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
  11624{
  11625    /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
  11626    return arm_feature(env, ARM_FEATURE_M) &&
  11627        extract32(address, 20, 12) == 0xe00;
  11628}
  11629
  11630static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
  11631{
  11632    /* True if address is in the M profile system region
  11633     * 0xe0000000 - 0xffffffff
  11634     */
  11635    return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
  11636}
  11637
  11638static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
  11639                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
  11640                                 hwaddr *phys_ptr, int *prot,
  11641                                 target_ulong *page_size,
  11642                                 ARMMMUFaultInfo *fi)
  11643{
  11644    ARMCPU *cpu = env_archcpu(env);
  11645    int n;
  11646    bool is_user = regime_is_user(env, mmu_idx);
  11647
  11648    *phys_ptr = address;
  11649    *page_size = TARGET_PAGE_SIZE;
  11650    *prot = 0;
  11651
  11652    if (regime_translation_disabled(env, mmu_idx) ||
  11653        m_is_ppb_region(env, address)) {
  11654        /* MPU disabled or M profile PPB access: use default memory map.
  11655         * The other case which uses the default memory map in the
  11656         * v7M ARM ARM pseudocode is exception vector reads from the vector
  11657         * table. In QEMU those accesses are done in arm_v7m_load_vector(),
  11658         * which always does a direct read using address_space_ldl(), rather
  11659         * than going via this function, so we don't need to check that here.
  11660         */
  11661        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
  11662    } else { /* MPU enabled */
  11663        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
  11664            /* region search */
  11665            uint32_t base = env->pmsav7.drbar[n];
  11666            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
  11667            uint32_t rmask;
  11668            bool srdis = false;
  11669
  11670            if (!(env->pmsav7.drsr[n] & 0x1)) {
  11671                continue;
  11672            }
  11673
  11674            if (!rsize) {
  11675                qemu_log_mask(LOG_GUEST_ERROR,
  11676                              "DRSR[%d]: Rsize field cannot be 0\n", n);
  11677                continue;
  11678            }
  11679            rsize++;
  11680            rmask = (1ull << rsize) - 1;
  11681
  11682            if (base & rmask) {
  11683                qemu_log_mask(LOG_GUEST_ERROR,
  11684                              "DRBAR[%d]: 0x%" PRIx32 " misaligned "
  11685                              "to DRSR region size, mask = 0x%" PRIx32 "\n",
  11686                              n, base, rmask);
  11687                continue;
  11688            }
  11689
  11690            if (address < base || address > base + rmask) {
  11691                /*
  11692                 * Address not in this region. We must check whether the
  11693                 * region covers addresses in the same page as our address.
  11694                 * In that case we must not report a size that covers the
  11695                 * whole page for a subsequent hit against a different MPU
  11696                 * region or the background region, because it would result in
  11697                 * incorrect TLB hits for subsequent accesses to addresses that
  11698                 * are in this MPU region.
  11699                 */
  11700                if (ranges_overlap(base, rmask,
  11701                                   address & TARGET_PAGE_MASK,
  11702                                   TARGET_PAGE_SIZE)) {
  11703                    *page_size = 1;
  11704                }
  11705                continue;
  11706            }
  11707
  11708            /* Region matched */
  11709
  11710            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
  11711                int i, snd;
  11712                uint32_t srdis_mask;
  11713
  11714                rsize -= 3; /* sub region size (power of 2) */
  11715                snd = ((address - base) >> rsize) & 0x7;
  11716                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
  11717
  11718                srdis_mask = srdis ? 0x3 : 0x0;
  11719                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
  11720                    /* This will check in groups of 2, 4 and then 8, whether
  11721                     * the subregion bits are consistent. rsize is incremented
  11722                     * back up to give the region size, considering consistent
  11723                     * adjacent subregions as one region. Stop testing if rsize
  11724                     * is already big enough for an entire QEMU page.
  11725                     */
  11726                    int snd_rounded = snd & ~(i - 1);
  11727                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
  11728                                                     snd_rounded + 8, i);
  11729                    if (srdis_mask ^ srdis_multi) {
  11730                        break;
  11731                    }
  11732                    srdis_mask = (srdis_mask << i) | srdis_mask;
  11733                    rsize++;
  11734                }
  11735            }
  11736            if (srdis) {
  11737                continue;
  11738            }
  11739            if (rsize < TARGET_PAGE_BITS) {
  11740                *page_size = 1 << rsize;
  11741            }
  11742            break;
  11743        }
  11744
  11745        if (n == -1) { /* no hits */
  11746            if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
  11747                /* background fault */
  11748                fi->type = ARMFault_Background;
  11749                return true;
  11750            }
  11751            get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
  11752        } else { /* a MPU hit! */
  11753            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
  11754            uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
  11755
  11756            if (m_is_system_region(env, address)) {
  11757                /* System space is always execute never */
  11758                xn = 1;
  11759            }
  11760
  11761            if (is_user) { /* User mode AP bit decoding */
  11762                switch (ap) {
  11763                case 0:
  11764                case 1:
  11765                case 5:
  11766                    break; /* no access */
  11767                case 3:
  11768                    *prot |= PAGE_WRITE;
  11769                    /* fall through */
  11770                case 2:
  11771                case 6:
  11772                    *prot |= PAGE_READ | PAGE_EXEC;
  11773                    break;
  11774                case 7:
  11775                    /* for v7M, same as 6; for R profile a reserved value */
  11776                    if (arm_feature(env, ARM_FEATURE_M)) {
  11777                        *prot |= PAGE_READ | PAGE_EXEC;
  11778                        break;
  11779                    }
  11780                    /* fall through */
  11781                default:
  11782                    qemu_log_mask(LOG_GUEST_ERROR,
  11783                                  "DRACR[%d]: Bad value for AP bits: 0x%"
  11784                                  PRIx32 "\n", n, ap);
  11785                }
  11786            } else { /* Priv. mode AP bits decoding */
  11787                switch (ap) {
  11788                case 0:
  11789                    break; /* no access */
  11790                case 1:
  11791                case 2:
  11792                case 3:
  11793                    *prot |= PAGE_WRITE;
  11794                    /* fall through */
  11795                case 5:
  11796                case 6:
  11797                    *prot |= PAGE_READ | PAGE_EXEC;
  11798                    break;
  11799                case 7:
  11800                    /* for v7M, same as 6; for R profile a reserved value */
  11801                    if (arm_feature(env, ARM_FEATURE_M)) {
  11802                        *prot |= PAGE_READ | PAGE_EXEC;
  11803                        break;
  11804                    }
  11805                    /* fall through */
  11806                default:
  11807                    qemu_log_mask(LOG_GUEST_ERROR,
  11808                                  "DRACR[%d]: Bad value for AP bits: 0x%"
  11809                                  PRIx32 "\n", n, ap);
  11810                }
  11811            }
  11812
  11813            /* execute never */
  11814            if (xn) {
  11815                *prot &= ~PAGE_EXEC;
  11816            }
  11817        }
  11818    }
  11819
  11820    fi->type = ARMFault_Permission;
  11821    fi->level = 1;
  11822    return !(*prot & (1 << access_type));
  11823}
  11824
  11825static bool v8m_is_sau_exempt(CPUARMState *env,
  11826                              uint32_t address, MMUAccessType access_type)
  11827{
  11828    /* The architecture specifies that certain address ranges are
  11829     * exempt from v8M SAU/IDAU checks.
  11830     */
  11831    return
  11832        (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
  11833        (address >= 0xe0000000 && address <= 0xe0002fff) ||
  11834        (address >= 0xe000e000 && address <= 0xe000efff) ||
  11835        (address >= 0xe002e000 && address <= 0xe002efff) ||
  11836        (address >= 0xe0040000 && address <= 0xe0041fff) ||
  11837        (address >= 0xe00ff000 && address <= 0xe00fffff);
  11838}
  11839
  11840void v8m_security_lookup(CPUARMState *env, uint32_t address,
  11841                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
  11842                                V8M_SAttributes *sattrs)
  11843{
  11844    /* Look up the security attributes for this address. Compare the
  11845     * pseudocode SecurityCheck() function.
  11846     * We assume the caller has zero-initialized *sattrs.
  11847     */
  11848    ARMCPU *cpu = env_archcpu(env);
  11849    int r;
  11850    bool idau_exempt = false, idau_ns = true, idau_nsc = true;
  11851    int idau_region = IREGION_NOTVALID;
  11852    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
  11853    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
  11854
  11855    if (cpu->idau) {
  11856        IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
  11857        IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
  11858
  11859        iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
  11860                   &idau_nsc);
  11861    }
  11862
  11863    if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
  11864        /* 0xf0000000..0xffffffff is always S for insn fetches */
  11865        return;
  11866    }
  11867
  11868    if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
  11869        sattrs->ns = !regime_is_secure(env, mmu_idx);
  11870        return;
  11871    }
  11872
  11873    if (idau_region != IREGION_NOTVALID) {
  11874        sattrs->irvalid = true;
  11875        sattrs->iregion = idau_region;
  11876    }
  11877
  11878    switch (env->sau.ctrl & 3) {
  11879    case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
  11880        break;
  11881    case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
  11882        sattrs->ns = true;
  11883        break;
  11884    default: /* SAU.ENABLE == 1 */
  11885        for (r = 0; r < cpu->sau_sregion; r++) {
  11886            if (env->sau.rlar[r] & 1) {
  11887                uint32_t base = env->sau.rbar[r] & ~0x1f;
  11888                uint32_t limit = env->sau.rlar[r] | 0x1f;
  11889
  11890                if (base <= address && limit >= address) {
  11891                    if (base > addr_page_base || limit < addr_page_limit) {
  11892                        sattrs->subpage = true;
  11893                    }
  11894                    if (sattrs->srvalid) {
  11895                        /* If we hit in more than one region then we must report
  11896                         * as Secure, not NS-Callable, with no valid region
  11897                         * number info.
  11898                         */
  11899                        sattrs->ns = false;
  11900                        sattrs->nsc = false;
  11901                        sattrs->sregion = 0;
  11902                        sattrs->srvalid = false;
  11903                        break;
  11904                    } else {
  11905                        if (env->sau.rlar[r] & 2) {
  11906                            sattrs->nsc = true;
  11907                        } else {
  11908                            sattrs->ns = true;
  11909                        }
  11910                        sattrs->srvalid = true;
  11911                        sattrs->sregion = r;
  11912                    }
  11913                } else {
  11914                    /*
  11915                     * Address not in this region. We must check whether the
  11916                     * region covers addresses in the same page as our address.
  11917                     * In that case we must not report a size that covers the
  11918                     * whole page for a subsequent hit against a different MPU
  11919                     * region or the background region, because it would result
  11920                     * in incorrect TLB hits for subsequent accesses to
  11921                     * addresses that are in this MPU region.
  11922                     */
  11923                    if (limit >= base &&
  11924                        ranges_overlap(base, limit - base + 1,
  11925                                       addr_page_base,
  11926                                       TARGET_PAGE_SIZE)) {
  11927                        sattrs->subpage = true;
  11928                    }
  11929                }
  11930            }
  11931        }
  11932        break;
  11933    }
  11934
  11935    /*
  11936     * The IDAU will override the SAU lookup results if it specifies
  11937     * higher security than the SAU does.
  11938     */
  11939    if (!idau_ns) {
  11940        if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
  11941            sattrs->ns = false;
  11942            sattrs->nsc = idau_nsc;
  11943        }
  11944    }
  11945}
  11946
  11947bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
  11948                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
  11949                              hwaddr *phys_ptr, MemTxAttrs *txattrs,
  11950                              int *prot, bool *is_subpage,
  11951                              ARMMMUFaultInfo *fi, uint32_t *mregion)
  11952{
  11953    /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
  11954     * that a full phys-to-virt translation does).
  11955     * mregion is (if not NULL) set to the region number which matched,
  11956     * or -1 if no region number is returned (MPU off, address did not
  11957     * hit a region, address hit in multiple regions).
  11958     * We set is_subpage to true if the region hit doesn't cover the
  11959     * entire TARGET_PAGE the address is within.
  11960     */
  11961    ARMCPU *cpu = env_archcpu(env);
  11962    bool is_user = regime_is_user(env, mmu_idx);
  11963    uint32_t secure = regime_is_secure(env, mmu_idx);
  11964    int n;
  11965    int matchregion = -1;
  11966    bool hit = false;
  11967    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
  11968    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
  11969
  11970    *is_subpage = false;
  11971    *phys_ptr = address;
  11972    *prot = 0;
  11973    if (mregion) {
  11974        *mregion = -1;
  11975    }
  11976
  11977    /* Unlike the ARM ARM pseudocode, we don't need to check whether this
  11978     * was an exception vector read from the vector table (which is always
  11979     * done using the default system address map), because those accesses
  11980     * are done in arm_v7m_load_vector(), which always does a direct
  11981     * read using address_space_ldl(), rather than going via this function.
  11982     */
  11983    if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
  11984        hit = true;
  11985    } else if (m_is_ppb_region(env, address)) {
  11986        hit = true;
  11987    } else {
  11988        if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
  11989            hit = true;
  11990        }
  11991
  11992        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
  11993            /* region search */
  11994            /* Note that the base address is bits [31:5] from the register
  11995             * with bits [4:0] all zeroes, but the limit address is bits
  11996             * [31:5] from the register with bits [4:0] all ones.
  11997             */
  11998            uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
  11999            uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
  12000
  12001            if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
  12002                /* Region disabled */
  12003                continue;
  12004            }
  12005
  12006            if (address < base || address > limit) {
  12007                /*
  12008                 * Address not in this region. We must check whether the
  12009                 * region covers addresses in the same page as our address.
  12010                 * In that case we must not report a size that covers the
  12011                 * whole page for a subsequent hit against a different MPU
  12012                 * region or the background region, because it would result in
  12013                 * incorrect TLB hits for subsequent accesses to addresses that
  12014                 * are in this MPU region.
  12015                 */
  12016                if (limit >= base &&
  12017                    ranges_overlap(base, limit - base + 1,
  12018                                   addr_page_base,
  12019                                   TARGET_PAGE_SIZE)) {
  12020                    *is_subpage = true;
  12021                }
  12022                continue;
  12023            }
  12024
  12025            if (base > addr_page_base || limit < addr_page_limit) {
  12026                *is_subpage = true;
  12027            }
  12028
  12029            if (matchregion != -1) {
  12030                /* Multiple regions match -- always a failure (unlike
  12031                 * PMSAv7 where highest-numbered-region wins)
  12032                 */
  12033                fi->type = ARMFault_Permission;
  12034                fi->level = 1;
  12035                return true;
  12036            }
  12037
  12038            matchregion = n;
  12039            hit = true;
  12040        }
  12041    }
  12042
  12043    if (!hit) {
  12044        /* background fault */
  12045        fi->type = ARMFault_Background;
  12046        return true;
  12047    }
  12048
  12049    if (matchregion == -1) {
  12050        /* hit using the background region */
  12051        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
  12052    } else {
  12053        uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
  12054        uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
  12055        bool pxn = false;
  12056
  12057        if (arm_feature(env, ARM_FEATURE_V8_1M)) {
  12058            pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
  12059        }
  12060
  12061        if (m_is_system_region(env, address)) {
  12062            /* System space is always execute never */
  12063            xn = 1;
  12064        }
  12065
  12066        *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
  12067        if (*prot && !xn && !(pxn && !is_user)) {
  12068            *prot |= PAGE_EXEC;
  12069        }
  12070        /* We don't need to look the attribute up in the MAIR0/MAIR1
  12071         * registers because that only tells us about cacheability.
  12072         */
  12073        if (mregion) {
  12074            *mregion = matchregion;
  12075        }
  12076    }
  12077
  12078    fi->type = ARMFault_Permission;
  12079    fi->level = 1;
  12080    return !(*prot & (1 << access_type));
  12081}
  12082
  12083
  12084static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
  12085                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
  12086                                 hwaddr *phys_ptr, MemTxAttrs *txattrs,
  12087                                 int *prot, target_ulong *page_size,
  12088                                 ARMMMUFaultInfo *fi)
  12089{
  12090    uint32_t secure = regime_is_secure(env, mmu_idx);
  12091    V8M_SAttributes sattrs = {};
  12092    bool ret;
  12093    bool mpu_is_subpage;
  12094
  12095    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
  12096        v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
  12097        if (access_type == MMU_INST_FETCH) {
  12098            /* Instruction fetches always use the MMU bank and the
  12099             * transaction attribute determined by the fetch address,
  12100             * regardless of CPU state. This is painful for QEMU
  12101             * to handle, because it would mean we need to encode
  12102             * into the mmu_idx not just the (user, negpri) information
  12103             * for the current security state but also that for the
  12104             * other security state, which would balloon the number
  12105             * of mmu_idx values needed alarmingly.
  12106             * Fortunately we can avoid this because it's not actually
  12107             * possible to arbitrarily execute code from memory with
  12108             * the wrong security attribute: it will always generate
  12109             * an exception of some kind or another, apart from the
  12110             * special case of an NS CPU executing an SG instruction
  12111             * in S&NSC memory. So we always just fail the translation
  12112             * here and sort things out in the exception handler
  12113             * (including possibly emulating an SG instruction).
  12114             */
  12115            if (sattrs.ns != !secure) {
  12116                if (sattrs.nsc) {
  12117                    fi->type = ARMFault_QEMU_NSCExec;
  12118                } else {
  12119                    fi->type = ARMFault_QEMU_SFault;
  12120                }
  12121                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
  12122                *phys_ptr = address;
  12123                *prot = 0;
  12124                return true;
  12125            }
  12126        } else {
  12127            /* For data accesses we always use the MMU bank indicated
  12128             * by the current CPU state, but the security attributes
  12129             * might downgrade a secure access to nonsecure.
  12130             */
  12131            if (sattrs.ns) {
  12132                txattrs->secure = false;
  12133            } else if (!secure) {
  12134                /* NS access to S memory must fault.
  12135                 * Architecturally we should first check whether the
  12136                 * MPU information for this address indicates that we
  12137                 * are doing an unaligned access to Device memory, which
  12138                 * should generate a UsageFault instead. QEMU does not
  12139                 * currently check for that kind of unaligned access though.
  12140                 * If we added it we would need to do so as a special case
  12141                 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
  12142                 */
  12143                fi->type = ARMFault_QEMU_SFault;
  12144                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
  12145                *phys_ptr = address;
  12146                *prot = 0;
  12147                return true;
  12148            }
  12149        }
  12150    }
  12151
  12152    ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
  12153                            txattrs, prot, &mpu_is_subpage, fi, NULL);
  12154    *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
  12155    return ret;
  12156}
  12157
  12158static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
  12159                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
  12160                                 hwaddr *phys_ptr, int *prot,
  12161                                 ARMMMUFaultInfo *fi)
  12162{
  12163    int n;
  12164    uint32_t mask;
  12165    uint32_t base;
  12166    bool is_user = regime_is_user(env, mmu_idx);
  12167
  12168    if (regime_translation_disabled(env, mmu_idx)) {
  12169        /* MPU disabled.  */
  12170        *phys_ptr = address;
  12171        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
  12172        return false;
  12173    }
  12174
  12175    *phys_ptr = address;
  12176    for (n = 7; n >= 0; n--) {
  12177        base = env->cp15.c6_region[n];
  12178        if ((base & 1) == 0) {
  12179            continue;
  12180        }
  12181        mask = 1 << ((base >> 1) & 0x1f);
  12182        /* Keep this shift separate from the above to avoid an
  12183           (undefined) << 32.  */
  12184        mask = (mask << 1) - 1;
  12185        if (((base ^ address) & ~mask) == 0) {
  12186            break;
  12187        }
  12188    }
  12189    if (n < 0) {
  12190        fi->type = ARMFault_Background;
  12191        return true;
  12192    }
  12193
  12194    if (access_type == MMU_INST_FETCH) {
  12195        mask = env->cp15.pmsav5_insn_ap;
  12196    } else {
  12197        mask = env->cp15.pmsav5_data_ap;
  12198    }
  12199    mask = (mask >> (n * 4)) & 0xf;
  12200    switch (mask) {
  12201    case 0:
  12202        fi->type = ARMFault_Permission;
  12203        fi->level = 1;
  12204        return true;
  12205    case 1:
  12206        if (is_user) {
  12207            fi->type = ARMFault_Permission;
  12208            fi->level = 1;
  12209            return true;
  12210        }
  12211        *prot = PAGE_READ | PAGE_WRITE;
  12212        break;
  12213    case 2:
  12214        *prot = PAGE_READ;
  12215        if (!is_user) {
  12216            *prot |= PAGE_WRITE;
  12217        }
  12218        break;
  12219    case 3:
  12220        *prot = PAGE_READ | PAGE_WRITE;
  12221        break;
  12222    case 5:
  12223        if (is_user) {
  12224            fi->type = ARMFault_Permission;
  12225            fi->level = 1;
  12226            return true;
  12227        }
  12228        *prot = PAGE_READ;
  12229        break;
  12230    case 6:
  12231        *prot = PAGE_READ;
  12232        break;
  12233    default:
  12234        /* Bad permission.  */
  12235        fi->type = ARMFault_Permission;
  12236        fi->level = 1;
  12237        return true;
  12238    }
  12239    *prot |= PAGE_EXEC;
  12240    return false;
  12241}
  12242
  12243/* Combine either inner or outer cacheability attributes for normal
  12244 * memory, according to table D4-42 and pseudocode procedure
  12245 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
  12246 *
  12247 * NB: only stage 1 includes allocation hints (RW bits), leading to
  12248 * some asymmetry.
  12249 */
  12250static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
  12251{
  12252    if (s1 == 4 || s2 == 4) {
  12253        /* non-cacheable has precedence */
  12254        return 4;
  12255    } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
  12256        /* stage 1 write-through takes precedence */
  12257        return s1;
  12258    } else if (extract32(s2, 2, 2) == 2) {
  12259        /* stage 2 write-through takes precedence, but the allocation hint
  12260         * is still taken from stage 1
  12261         */
  12262        return (2 << 2) | extract32(s1, 0, 2);
  12263    } else { /* write-back */
  12264        return s1;
  12265    }
  12266}
  12267
  12268/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
  12269 * and CombineS1S2Desc()
  12270 *
  12271 * @s1:      Attributes from stage 1 walk
  12272 * @s2:      Attributes from stage 2 walk
  12273 */
  12274static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
  12275{
  12276    uint8_t s1lo, s2lo, s1hi, s2hi;
  12277    ARMCacheAttrs ret;
  12278    bool tagged = false;
  12279
  12280    if (s1.attrs == 0xf0) {
  12281        tagged = true;
  12282        s1.attrs = 0xff;
  12283    }
  12284
  12285    s1lo = extract32(s1.attrs, 0, 4);
  12286    s2lo = extract32(s2.attrs, 0, 4);
  12287    s1hi = extract32(s1.attrs, 4, 4);
  12288    s2hi = extract32(s2.attrs, 4, 4);
  12289
  12290    /* Combine shareability attributes (table D4-43) */
  12291    if (s1.shareability == 2 || s2.shareability == 2) {
  12292        /* if either are outer-shareable, the result is outer-shareable */
  12293        ret.shareability = 2;
  12294    } else if (s1.shareability == 3 || s2.shareability == 3) {
  12295        /* if either are inner-shareable, the result is inner-shareable */
  12296        ret.shareability = 3;
  12297    } else {
  12298        /* both non-shareable */
  12299        ret.shareability = 0;
  12300    }
  12301
  12302    /* Combine memory type and cacheability attributes */
  12303    if (s1hi == 0 || s2hi == 0) {
  12304        /* Device has precedence over normal */
  12305        if (s1lo == 0 || s2lo == 0) {
  12306            /* nGnRnE has precedence over anything */
  12307            ret.attrs = 0;
  12308        } else if (s1lo == 4 || s2lo == 4) {
  12309            /* non-Reordering has precedence over Reordering */
  12310            ret.attrs = 4;  /* nGnRE */
  12311        } else if (s1lo == 8 || s2lo == 8) {
  12312            /* non-Gathering has precedence over Gathering */
  12313            ret.attrs = 8;  /* nGRE */
  12314        } else {
  12315            ret.attrs = 0xc; /* GRE */
  12316        }
  12317
  12318        /* Any location for which the resultant memory type is any
  12319         * type of Device memory is always treated as Outer Shareable.
  12320         */
  12321        ret.shareability = 2;
  12322    } else { /* Normal memory */
  12323        /* Outer/inner cacheability combine independently */
  12324        ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
  12325                  | combine_cacheattr_nibble(s1lo, s2lo);
  12326
  12327        if (ret.attrs == 0x44) {
  12328            /* Any location for which the resultant memory type is Normal
  12329             * Inner Non-cacheable, Outer Non-cacheable is always treated
  12330             * as Outer Shareable.
  12331             */
  12332            ret.shareability = 2;
  12333        }
  12334    }
  12335
  12336    /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
  12337    if (tagged && ret.attrs == 0xff) {
  12338        ret.attrs = 0xf0;
  12339    }
  12340
  12341    return ret;
  12342}
  12343
  12344
  12345/* get_phys_addr - get the physical address for this virtual address
  12346 *
  12347 * Find the physical address corresponding to the given virtual address,
  12348 * by doing a translation table walk on MMU based systems or using the
  12349 * MPU state on MPU based systems.
  12350 *
  12351 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
  12352 * prot and page_size may not be filled in, and the populated fsr value provides
  12353 * information on why the translation aborted, in the format of a
  12354 * DFSR/IFSR fault register, with the following caveats:
  12355 *  * we honour the short vs long DFSR format differences.
  12356 *  * the WnR bit is never set (the caller must do this).
  12357 *  * for PSMAv5 based systems we don't bother to return a full FSR format
  12358 *    value.
  12359 *
  12360 * @env: CPUARMState
  12361 * @address: virtual address to get physical address for
  12362 * @access_type: 0 for read, 1 for write, 2 for execute
  12363 * @mmu_idx: MMU index indicating required translation regime
  12364 * @phys_ptr: set to the physical address corresponding to the virtual address
  12365 * @attrs: set to the memory transaction attributes to use
  12366 * @prot: set to the permissions for the page containing phys_ptr
  12367 * @page_size: set to the size of the page containing phys_ptr
  12368 * @fi: set to fault info if the translation fails
  12369 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
  12370 */
  12371bool get_phys_addr(CPUARMState *env, target_ulong address,
  12372                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
  12373                   hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  12374                   target_ulong *page_size,
  12375                   ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
  12376{
  12377    ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
  12378
  12379    if (mmu_idx != s1_mmu_idx) {
  12380        /* Call ourselves recursively to do the stage 1 and then stage 2
  12381         * translations if mmu_idx is a two-stage regime.
  12382         */
  12383        if (arm_feature(env, ARM_FEATURE_EL2)) {
  12384            hwaddr ipa;
  12385            int s2_prot;
  12386            int ret;
  12387            ARMCacheAttrs cacheattrs2 = {};
  12388            ARMMMUIdx s2_mmu_idx;
  12389            bool is_el0;
  12390
  12391            ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
  12392                                attrs, prot, page_size, fi, cacheattrs);
  12393
  12394            /* If S1 fails or S2 is disabled, return early.  */
  12395            if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
  12396                *phys_ptr = ipa;
  12397                return ret;
  12398            }
  12399
  12400            s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
  12401            is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
  12402
  12403            /* S1 is done. Now do S2 translation.  */
  12404            ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
  12405                                     phys_ptr, attrs, &s2_prot,
  12406                                     page_size, fi, &cacheattrs2);
  12407            fi->s2addr = ipa;
  12408            /* Combine the S1 and S2 perms.  */
  12409            *prot &= s2_prot;
  12410
  12411            /* If S2 fails, return early.  */
  12412            if (ret) {
  12413                return ret;
  12414            }
  12415
  12416            /* Combine the S1 and S2 cache attributes. */
  12417            if (arm_hcr_el2_eff(env) & HCR_DC) {
  12418                /*
  12419                 * HCR.DC forces the first stage attributes to
  12420                 *  Normal Non-Shareable,
  12421                 *  Inner Write-Back Read-Allocate Write-Allocate,
  12422                 *  Outer Write-Back Read-Allocate Write-Allocate.
  12423                 * Do not overwrite Tagged within attrs.
  12424                 */
  12425                if (cacheattrs->attrs != 0xf0) {
  12426                    cacheattrs->attrs = 0xff;
  12427                }
  12428                cacheattrs->shareability = 0;
  12429            }
  12430            *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
  12431
  12432            /* Check if IPA translates to secure or non-secure PA space. */
  12433            if (arm_is_secure_below_el3(env)) {
  12434                if (attrs->secure) {
  12435                    attrs->secure =
  12436                        !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
  12437                } else {
  12438                    attrs->secure =
  12439                        !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
  12440                        || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
  12441                }
  12442            }
  12443            return 0;
  12444        } else {
  12445            /*
  12446             * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
  12447             */
  12448            mmu_idx = stage_1_mmu_idx(mmu_idx);
  12449        }
  12450    }
  12451
  12452    /* The page table entries may downgrade secure to non-secure, but
  12453     * cannot upgrade an non-secure translation regime's attributes
  12454     * to secure.
  12455     */
  12456    attrs->secure = regime_is_secure(env, mmu_idx);
  12457    attrs->user = regime_is_user(env, mmu_idx);
  12458
  12459    /* Fast Context Switch Extension. This doesn't exist at all in v8.
  12460     * In v7 and earlier it affects all stage 1 translations.
  12461     */
  12462    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
  12463        && !arm_feature(env, ARM_FEATURE_V8)) {
  12464        if (regime_el(env, mmu_idx) == 3) {
  12465            address += env->cp15.fcseidr_s;
  12466        } else {
  12467            address += env->cp15.fcseidr_ns;
  12468        }
  12469    }
  12470
  12471    if (arm_feature(env, ARM_FEATURE_PMSA)) {
  12472        bool ret;
  12473        *page_size = TARGET_PAGE_SIZE;
  12474
  12475        if (arm_feature(env, ARM_FEATURE_V8)) {
  12476            /* PMSAv8 */
  12477            ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
  12478                                       phys_ptr, attrs, prot, page_size, fi);
  12479        } else if (arm_feature(env, ARM_FEATURE_V7)) {
  12480            /* PMSAv7 */
  12481            ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
  12482                                       phys_ptr, prot, page_size, fi);
  12483        } else {
  12484            /* Pre-v7 MPU */
  12485            ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
  12486                                       phys_ptr, prot, fi);
  12487        }
  12488        qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
  12489                      " mmu_idx %u -> %s (prot %c%c%c)\n",
  12490                      access_type == MMU_DATA_LOAD ? "reading" :
  12491                      (access_type == MMU_DATA_STORE ? "writing" : "execute"),
  12492                      (uint32_t)address, mmu_idx,
  12493                      ret ? "Miss" : "Hit",
  12494                      *prot & PAGE_READ ? 'r' : '-',
  12495                      *prot & PAGE_WRITE ? 'w' : '-',
  12496                      *prot & PAGE_EXEC ? 'x' : '-');
  12497
  12498        return ret;
  12499    }
  12500
  12501    /* Definitely a real MMU, not an MPU */
  12502
  12503    if (regime_translation_disabled(env, mmu_idx)) {
  12504        uint64_t hcr;
  12505        uint8_t memattr;
  12506
  12507        /*
  12508         * MMU disabled.  S1 addresses within aa64 translation regimes are
  12509         * still checked for bounds -- see AArch64.TranslateAddressS1Off.
  12510         */
  12511        if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
  12512            int r_el = regime_el(env, mmu_idx);
  12513            if (arm_el_is_aa64(env, r_el)) {
  12514                int pamax = arm_pamax(env_archcpu(env));
  12515                uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
  12516                int addrtop, tbi;
  12517
  12518                tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
  12519                if (access_type == MMU_INST_FETCH) {
  12520                    tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
  12521                }
  12522                tbi = (tbi >> extract64(address, 55, 1)) & 1;
  12523                addrtop = (tbi ? 55 : 63);
  12524
  12525                if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
  12526                    fi->type = ARMFault_AddressSize;
  12527                    fi->level = 0;
  12528                    fi->stage2 = false;
  12529                    return 1;
  12530                }
  12531
  12532                /*
  12533                 * When TBI is disabled, we've just validated that all of the
  12534                 * bits above PAMax are zero, so logically we only need to
  12535                 * clear the top byte for TBI.  But it's clearer to follow
  12536                 * the pseudocode set of addrdesc.paddress.
  12537                 */
  12538                address = extract64(address, 0, 52);
  12539            }
  12540        }
  12541        *phys_ptr = address;
  12542        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
  12543        *page_size = TARGET_PAGE_SIZE;
  12544
  12545        /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
  12546        hcr = arm_hcr_el2_eff(env);
  12547        cacheattrs->shareability = 0;
  12548        if (hcr & HCR_DC) {
  12549            if (hcr & HCR_DCT) {
  12550                memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
  12551            } else {
  12552                memattr = 0xff;  /* Normal, WB, RWA */
  12553            }
  12554        } else if (access_type == MMU_INST_FETCH) {
  12555            if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
  12556                memattr = 0xee;  /* Normal, WT, RA, NT */
  12557            } else {
  12558                memattr = 0x44;  /* Normal, NC, No */
  12559            }
  12560            cacheattrs->shareability = 2; /* outer sharable */
  12561        } else {
  12562            memattr = 0x00;      /* Device, nGnRnE */
  12563        }
  12564        cacheattrs->attrs = memattr;
  12565        return 0;
  12566    }
  12567
  12568    if (regime_using_lpae_format(env, mmu_idx)) {
  12569        return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
  12570                                  phys_ptr, attrs, prot, page_size,
  12571                                  fi, cacheattrs);
  12572    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
  12573        return get_phys_addr_v6(env, address, access_type, mmu_idx,
  12574                                phys_ptr, attrs, prot, page_size, fi);
  12575    } else {
  12576        return get_phys_addr_v5(env, address, access_type, mmu_idx,
  12577                                    phys_ptr, prot, page_size, fi);
  12578    }
  12579}
  12580
  12581hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
  12582                                         MemTxAttrs *attrs)
  12583{
  12584    ARMCPU *cpu = ARM_CPU(cs);
  12585    CPUARMState *env = &cpu->env;
  12586    hwaddr phys_addr;
  12587    target_ulong page_size;
  12588    int prot;
  12589    bool ret;
  12590    ARMMMUFaultInfo fi = {};
  12591    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
  12592    ARMCacheAttrs cacheattrs = {};
  12593
  12594    *attrs = (MemTxAttrs) {};
  12595
  12596    ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
  12597                        attrs, &prot, &page_size, &fi, &cacheattrs);
  12598
  12599    if (ret) {
  12600        return -1;
  12601    }
  12602    return phys_addr;
  12603}
  12604
  12605#endif
  12606
  12607/* Note that signed overflow is undefined in C.  The following routines are
  12608   careful to use unsigned types where modulo arithmetic is required.
  12609   Failure to do so _will_ break on newer gcc.  */
  12610
  12611/* Signed saturating arithmetic.  */
  12612
  12613/* Perform 16-bit signed saturating addition.  */
  12614static inline uint16_t add16_sat(uint16_t a, uint16_t b)
  12615{
  12616    uint16_t res;
  12617
  12618    res = a + b;
  12619    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
  12620        if (a & 0x8000)
  12621            res = 0x8000;
  12622        else
  12623            res = 0x7fff;
  12624    }
  12625    return res;
  12626}
  12627
  12628/* Perform 8-bit signed saturating addition.  */
  12629static inline uint8_t add8_sat(uint8_t a, uint8_t b)
  12630{
  12631    uint8_t res;
  12632
  12633    res = a + b;
  12634    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
  12635        if (a & 0x80)
  12636            res = 0x80;
  12637        else
  12638            res = 0x7f;
  12639    }
  12640    return res;
  12641}
  12642
  12643/* Perform 16-bit signed saturating subtraction.  */
  12644static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
  12645{
  12646    uint16_t res;
  12647
  12648    res = a - b;
  12649    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
  12650        if (a & 0x8000)
  12651            res = 0x8000;
  12652        else
  12653            res = 0x7fff;
  12654    }
  12655    return res;
  12656}
  12657
  12658/* Perform 8-bit signed saturating subtraction.  */
  12659static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
  12660{
  12661    uint8_t res;
  12662
  12663    res = a - b;
  12664    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
  12665        if (a & 0x80)
  12666            res = 0x80;
  12667        else
  12668            res = 0x7f;
  12669    }
  12670    return res;
  12671}
  12672
  12673#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
  12674#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
  12675#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
  12676#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
  12677#define PFX q
  12678
  12679#include "op_addsub.h"
  12680
  12681/* Unsigned saturating arithmetic.  */
  12682static inline uint16_t add16_usat(uint16_t a, uint16_t b)
  12683{
  12684    uint16_t res;
  12685    res = a + b;
  12686    if (res < a)
  12687        res = 0xffff;
  12688    return res;
  12689}
  12690
  12691static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
  12692{
  12693    if (a > b)
  12694        return a - b;
  12695    else
  12696        return 0;
  12697}
  12698
  12699static inline uint8_t add8_usat(uint8_t a, uint8_t b)
  12700{
  12701    uint8_t res;
  12702    res = a + b;
  12703    if (res < a)
  12704        res = 0xff;
  12705    return res;
  12706}
  12707
  12708static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
  12709{
  12710    if (a > b)
  12711        return a - b;
  12712    else
  12713        return 0;
  12714}
  12715
  12716#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
  12717#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
  12718#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
  12719#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
  12720#define PFX uq
  12721
  12722#include "op_addsub.h"
  12723
  12724/* Signed modulo arithmetic.  */
  12725#define SARITH16(a, b, n, op) do { \
  12726    int32_t sum; \
  12727    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
  12728    RESULT(sum, n, 16); \
  12729    if (sum >= 0) \
  12730        ge |= 3 << (n * 2); \
  12731    } while(0)
  12732
  12733#define SARITH8(a, b, n, op) do { \
  12734    int32_t sum; \
  12735    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
  12736    RESULT(sum, n, 8); \
  12737    if (sum >= 0) \
  12738        ge |= 1 << n; \
  12739    } while(0)
  12740
  12741
  12742#define ADD16(a, b, n) SARITH16(a, b, n, +)
  12743#define SUB16(a, b, n) SARITH16(a, b, n, -)
  12744#define ADD8(a, b, n)  SARITH8(a, b, n, +)
  12745#define SUB8(a, b, n)  SARITH8(a, b, n, -)
  12746#define PFX s
  12747#define ARITH_GE
  12748
  12749#include "op_addsub.h"
  12750
  12751/* Unsigned modulo arithmetic.  */
  12752#define ADD16(a, b, n) do { \
  12753    uint32_t sum; \
  12754    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
  12755    RESULT(sum, n, 16); \
  12756    if ((sum >> 16) == 1) \
  12757        ge |= 3 << (n * 2); \
  12758    } while(0)
  12759
  12760#define ADD8(a, b, n) do { \
  12761    uint32_t sum; \
  12762    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
  12763    RESULT(sum, n, 8); \
  12764    if ((sum >> 8) == 1) \
  12765        ge |= 1 << n; \
  12766    } while(0)
  12767
  12768#define SUB16(a, b, n) do { \
  12769    uint32_t sum; \
  12770    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
  12771    RESULT(sum, n, 16); \
  12772    if ((sum >> 16) == 0) \
  12773        ge |= 3 << (n * 2); \
  12774    } while(0)
  12775
  12776#define SUB8(a, b, n) do { \
  12777    uint32_t sum; \
  12778    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
  12779    RESULT(sum, n, 8); \
  12780    if ((sum >> 8) == 0) \
  12781        ge |= 1 << n; \
  12782    } while(0)
  12783
  12784#define PFX u
  12785#define ARITH_GE
  12786
  12787#include "op_addsub.h"
  12788
  12789/* Halved signed arithmetic.  */
  12790#define ADD16(a, b, n) \
  12791  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
  12792#define SUB16(a, b, n) \
  12793  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
  12794#define ADD8(a, b, n) \
  12795  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
  12796#define SUB8(a, b, n) \
  12797  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
  12798#define PFX sh
  12799
  12800#include "op_addsub.h"
  12801
  12802/* Halved unsigned arithmetic.  */
  12803#define ADD16(a, b, n) \
  12804  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
  12805#define SUB16(a, b, n) \
  12806  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
  12807#define ADD8(a, b, n) \
  12808  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
  12809#define SUB8(a, b, n) \
  12810  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
  12811#define PFX uh
  12812
  12813#include "op_addsub.h"
  12814
  12815static inline uint8_t do_usad(uint8_t a, uint8_t b)
  12816{
  12817    if (a > b)
  12818        return a - b;
  12819    else
  12820        return b - a;
  12821}
  12822
  12823/* Unsigned sum of absolute byte differences.  */
  12824uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
  12825{
  12826    uint32_t sum;
  12827    sum = do_usad(a, b);
  12828    sum += do_usad(a >> 8, b >> 8);
  12829    sum += do_usad(a >> 16, b >> 16);
  12830    sum += do_usad(a >> 24, b >> 24);
  12831    return sum;
  12832}
  12833
  12834/* For ARMv6 SEL instruction.  */
  12835uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
  12836{
  12837    uint32_t mask;
  12838
  12839    mask = 0;
  12840    if (flags & 1)
  12841        mask |= 0xff;
  12842    if (flags & 2)
  12843        mask |= 0xff00;
  12844    if (flags & 4)
  12845        mask |= 0xff0000;
  12846    if (flags & 8)
  12847        mask |= 0xff000000;
  12848    return (a & mask) | (b & ~mask);
  12849}
  12850
  12851/* CRC helpers.
  12852 * The upper bytes of val (above the number specified by 'bytes') must have
  12853 * been zeroed out by the caller.
  12854 */
  12855uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
  12856{
  12857    uint8_t buf[4];
  12858
  12859    stl_le_p(buf, val);
  12860
  12861    /* zlib crc32 converts the accumulator and output to one's complement.  */
  12862    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
  12863}
  12864
  12865uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
  12866{
  12867    uint8_t buf[4];
  12868
  12869    stl_le_p(buf, val);
  12870
  12871    /* Linux crc32c converts the output to one's complement.  */
  12872    return crc32c(acc, buf, bytes) ^ 0xffffffff;
  12873}
  12874
  12875/* Return the exception level to which FP-disabled exceptions should
  12876 * be taken, or 0 if FP is enabled.
  12877 */
  12878int fp_exception_el(CPUARMState *env, int cur_el)
  12879{
  12880#ifndef CONFIG_USER_ONLY
  12881    /* CPACR and the CPTR registers don't exist before v6, so FP is
  12882     * always accessible
  12883     */
  12884    if (!arm_feature(env, ARM_FEATURE_V6)) {
  12885        return 0;
  12886    }
  12887
  12888    if (arm_feature(env, ARM_FEATURE_M)) {
  12889        /* CPACR can cause a NOCP UsageFault taken to current security state */
  12890        if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
  12891            return 1;
  12892        }
  12893
  12894        if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
  12895            if (!extract32(env->v7m.nsacr, 10, 1)) {
  12896                /* FP insns cause a NOCP UsageFault taken to Secure */
  12897                return 3;
  12898            }
  12899        }
  12900
  12901        return 0;
  12902    }
  12903
  12904    /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
  12905     * 0, 2 : trap EL0 and EL1/PL1 accesses
  12906     * 1    : trap only EL0 accesses
  12907     * 3    : trap no accesses
  12908     * This register is ignored if E2H+TGE are both set.
  12909     */
  12910    if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
  12911        int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
  12912
  12913        switch (fpen) {
  12914        case 0:
  12915        case 2:
  12916            if (cur_el == 0 || cur_el == 1) {
  12917                /* Trap to PL1, which might be EL1 or EL3 */
  12918                if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
  12919                    return 3;
  12920                }
  12921                return 1;
  12922            }
  12923            if (cur_el == 3 && !is_a64(env)) {
  12924                /* Secure PL1 running at EL3 */
  12925                return 3;
  12926            }
  12927            break;
  12928        case 1:
  12929            if (cur_el == 0) {
  12930                return 1;
  12931            }
  12932            break;
  12933        case 3:
  12934            break;
  12935        }
  12936    }
  12937
  12938    /*
  12939     * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
  12940     * to control non-secure access to the FPU. It doesn't have any
  12941     * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
  12942     */
  12943    if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
  12944         cur_el <= 2 && !arm_is_secure_below_el3(env))) {
  12945        if (!extract32(env->cp15.nsacr, 10, 1)) {
  12946            /* FP insns act as UNDEF */
  12947            return cur_el == 2 ? 2 : 1;
  12948        }
  12949    }
  12950
  12951    /* For the CPTR registers we don't need to guard with an ARM_FEATURE
  12952     * check because zero bits in the registers mean "don't trap".
  12953     */
  12954
  12955    /* CPTR_EL2 : present in v7VE or v8 */
  12956    if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
  12957        && arm_is_el2_enabled(env)) {
  12958        /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
  12959        return 2;
  12960    }
  12961
  12962    /* CPTR_EL3 : present in v8 */
  12963    if (extract32(env->cp15.cptr_el[3], 10, 1)) {
  12964        /* Trap all FP ops to EL3 */
  12965        return 3;
  12966    }
  12967#endif
  12968    return 0;
  12969}
  12970
  12971/* Return the exception level we're running at if this is our mmu_idx */
  12972int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
  12973{
  12974    if (mmu_idx & ARM_MMU_IDX_M) {
  12975        return mmu_idx & ARM_MMU_IDX_M_PRIV;
  12976    }
  12977
  12978    switch (mmu_idx) {
  12979    case ARMMMUIdx_E10_0:
  12980    case ARMMMUIdx_E20_0:
  12981    case ARMMMUIdx_SE10_0:
  12982    case ARMMMUIdx_SE20_0:
  12983        return 0;
  12984    case ARMMMUIdx_E10_1:
  12985    case ARMMMUIdx_E10_1_PAN:
  12986    case ARMMMUIdx_SE10_1:
  12987    case ARMMMUIdx_SE10_1_PAN:
  12988        return 1;
  12989    case ARMMMUIdx_E2:
  12990    case ARMMMUIdx_E20_2:
  12991    case ARMMMUIdx_E20_2_PAN:
  12992    case ARMMMUIdx_SE2:
  12993    case ARMMMUIdx_SE20_2:
  12994    case ARMMMUIdx_SE20_2_PAN:
  12995        return 2;
  12996    case ARMMMUIdx_SE3:
  12997        return 3;
  12998    default:
  12999        g_assert_not_reached();
  13000    }
  13001}
  13002
  13003#ifndef CONFIG_TCG
  13004ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
  13005{
  13006    g_assert_not_reached();
  13007}
  13008#endif
  13009
  13010ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
  13011{
  13012    ARMMMUIdx idx;
  13013    uint64_t hcr;
  13014
  13015    if (arm_feature(env, ARM_FEATURE_M)) {
  13016        return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
  13017    }
  13018
  13019    /* See ARM pseudo-function ELIsInHost.  */
  13020    switch (el) {
  13021    case 0:
  13022        hcr = arm_hcr_el2_eff(env);
  13023        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
  13024            idx = ARMMMUIdx_E20_0;
  13025        } else {
  13026            idx = ARMMMUIdx_E10_0;
  13027        }
  13028        break;
  13029    case 1:
  13030        if (env->pstate & PSTATE_PAN) {
  13031            idx = ARMMMUIdx_E10_1_PAN;
  13032        } else {
  13033            idx = ARMMMUIdx_E10_1;
  13034        }
  13035        break;
  13036    case 2:
  13037        /* Note that TGE does not apply at EL2.  */
  13038        if (arm_hcr_el2_eff(env) & HCR_E2H) {
  13039            if (env->pstate & PSTATE_PAN) {
  13040                idx = ARMMMUIdx_E20_2_PAN;
  13041            } else {
  13042                idx = ARMMMUIdx_E20_2;
  13043            }
  13044        } else {
  13045            idx = ARMMMUIdx_E2;
  13046        }
  13047        break;
  13048    case 3:
  13049        return ARMMMUIdx_SE3;
  13050    default:
  13051        g_assert_not_reached();
  13052    }
  13053
  13054    if (arm_is_secure_below_el3(env)) {
  13055        idx &= ~ARM_MMU_IDX_A_NS;
  13056    }
  13057
  13058    return idx;
  13059}
  13060
  13061ARMMMUIdx arm_mmu_idx(CPUARMState *env)
  13062{
  13063    return arm_mmu_idx_el(env, arm_current_el(env));
  13064}
  13065
  13066#ifndef CONFIG_USER_ONLY
  13067ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
  13068{
  13069    return stage_1_mmu_idx(arm_mmu_idx(env));
  13070}
  13071#endif
  13072
  13073static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
  13074                                           ARMMMUIdx mmu_idx,
  13075                                           CPUARMTBFlags flags)
  13076{
  13077    DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
  13078    DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
  13079
  13080    if (arm_singlestep_active(env)) {
  13081        DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
  13082    }
  13083    return flags;
  13084}
  13085
  13086static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
  13087                                              ARMMMUIdx mmu_idx,
  13088                                              CPUARMTBFlags flags)
  13089{
  13090    bool sctlr_b = arm_sctlr_b(env);
  13091
  13092    if (sctlr_b) {
  13093        DP_TBFLAG_A32(flags, SCTLR__B, 1);
  13094    }
  13095    if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
  13096        DP_TBFLAG_ANY(flags, BE_DATA, 1);
  13097    }
  13098    DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
  13099
  13100    return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
  13101}
  13102
  13103static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
  13104                                        ARMMMUIdx mmu_idx)
  13105{
  13106    CPUARMTBFlags flags = {};
  13107    uint32_t ccr = env->v7m.ccr[env->v7m.secure];
  13108
  13109    /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
  13110    if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
  13111        DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
  13112    }
  13113
  13114    if (arm_v7m_is_handler_mode(env)) {
  13115        DP_TBFLAG_M32(flags, HANDLER, 1);
  13116    }
  13117
  13118    /*
  13119     * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
  13120     * is suppressing them because the requested execution priority
  13121     * is less than 0.
  13122     */
  13123    if (arm_feature(env, ARM_FEATURE_V8) &&
  13124        !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
  13125          (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
  13126        DP_TBFLAG_M32(flags, STACKCHECK, 1);
  13127    }
  13128
  13129    return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
  13130}
  13131
  13132static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
  13133{
  13134    CPUARMTBFlags flags = {};
  13135
  13136    DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
  13137    return flags;
  13138}
  13139
  13140static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
  13141                                        ARMMMUIdx mmu_idx)
  13142{
  13143    CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
  13144    int el = arm_current_el(env);
  13145
  13146    if (arm_sctlr(env, el) & SCTLR_A) {
  13147        DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
  13148    }
  13149
  13150    if (arm_el_is_aa64(env, 1)) {
  13151        DP_TBFLAG_A32(flags, VFPEN, 1);
  13152    }
  13153
  13154    if (el < 2 && env->cp15.hstr_el2 &&
  13155        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
  13156        DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
  13157    }
  13158
  13159    if (env->uncached_cpsr & CPSR_IL) {
  13160        DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
  13161    }
  13162
  13163    return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
  13164}
  13165
  13166static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
  13167                                        ARMMMUIdx mmu_idx)
  13168{
  13169    CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
  13170    ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
  13171    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
  13172    uint64_t sctlr;
  13173    int tbii, tbid;
  13174
  13175    DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
  13176
  13177    /* Get control bits for tagged addresses.  */
  13178    tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
  13179    tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
  13180
  13181    DP_TBFLAG_A64(flags, TBII, tbii);
  13182    DP_TBFLAG_A64(flags, TBID, tbid);
  13183
  13184    if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
  13185        int sve_el = sve_exception_el(env, el);
  13186        uint32_t zcr_len;
  13187
  13188        /*
  13189         * If SVE is disabled, but FP is enabled,
  13190         * then the effective len is 0.
  13191         */
  13192        if (sve_el != 0 && fp_el == 0) {
  13193            zcr_len = 0;
  13194        } else {
  13195            zcr_len = sve_zcr_len_for_el(env, el);
  13196        }
  13197        DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
  13198        DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
  13199    }
  13200
  13201    sctlr = regime_sctlr(env, stage1);
  13202
  13203    if (sctlr & SCTLR_A) {
  13204        DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
  13205    }
  13206
  13207    if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
  13208        DP_TBFLAG_ANY(flags, BE_DATA, 1);
  13209    }
  13210
  13211    if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
  13212        /*
  13213         * In order to save space in flags, we record only whether
  13214         * pauth is "inactive", meaning all insns are implemented as
  13215         * a nop, or "active" when some action must be performed.
  13216         * The decision of which action to take is left to a helper.
  13217         */
  13218        if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
  13219            DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
  13220        }
  13221    }
  13222
  13223    if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
  13224        /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
  13225        if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
  13226            DP_TBFLAG_A64(flags, BT, 1);
  13227        }
  13228    }
  13229
  13230    /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
  13231    if (!(env->pstate & PSTATE_UAO)) {
  13232        switch (mmu_idx) {
  13233        case ARMMMUIdx_E10_1:
  13234        case ARMMMUIdx_E10_1_PAN:
  13235        case ARMMMUIdx_SE10_1:
  13236        case ARMMMUIdx_SE10_1_PAN:
  13237            /* TODO: ARMv8.3-NV */
  13238            DP_TBFLAG_A64(flags, UNPRIV, 1);
  13239            break;
  13240        case ARMMMUIdx_E20_2:
  13241        case ARMMMUIdx_E20_2_PAN:
  13242        case ARMMMUIdx_SE20_2:
  13243        case ARMMMUIdx_SE20_2_PAN:
  13244            /*
  13245             * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
  13246             * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
  13247             */
  13248            if (env->cp15.hcr_el2 & HCR_TGE) {
  13249                DP_TBFLAG_A64(flags, UNPRIV, 1);
  13250            }
  13251            break;
  13252        default:
  13253            break;
  13254        }
  13255    }
  13256
  13257    if (env->pstate & PSTATE_IL) {
  13258        DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
  13259    }
  13260
  13261    if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
  13262        /*
  13263         * Set MTE_ACTIVE if any access may be Checked, and leave clear
  13264         * if all accesses must be Unchecked:
  13265         * 1) If no TBI, then there are no tags in the address to check,
  13266         * 2) If Tag Check Override, then all accesses are Unchecked,
  13267         * 3) If Tag Check Fail == 0, then Checked access have no effect,
  13268         * 4) If no Allocation Tag Access, then all accesses are Unchecked.
  13269         */
  13270        if (allocation_tag_access_enabled(env, el, sctlr)) {
  13271            DP_TBFLAG_A64(flags, ATA, 1);
  13272            if (tbid
  13273                && !(env->pstate & PSTATE_TCO)
  13274                && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
  13275                DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
  13276            }
  13277        }
  13278        /* And again for unprivileged accesses, if required.  */
  13279        if (EX_TBFLAG_A64(flags, UNPRIV)
  13280            && tbid
  13281            && !(env->pstate & PSTATE_TCO)
  13282            && (sctlr & SCTLR_TCF0)
  13283            && allocation_tag_access_enabled(env, 0, sctlr)) {
  13284            DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
  13285        }
  13286        /* Cache TCMA as well as TBI. */
  13287        DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
  13288    }
  13289
  13290    return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
  13291}
  13292
  13293static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
  13294{
  13295    int el = arm_current_el(env);
  13296    int fp_el = fp_exception_el(env, el);
  13297    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  13298
  13299    if (is_a64(env)) {
  13300        return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
  13301    } else if (arm_feature(env, ARM_FEATURE_M)) {
  13302        return rebuild_hflags_m32(env, fp_el, mmu_idx);
  13303    } else {
  13304        return rebuild_hflags_a32(env, fp_el, mmu_idx);
  13305    }
  13306}
  13307
  13308void arm_rebuild_hflags(CPUARMState *env)
  13309{
  13310    env->hflags = rebuild_hflags_internal(env);
  13311}
  13312
  13313/*
  13314 * If we have triggered a EL state change we can't rely on the
  13315 * translator having passed it to us, we need to recompute.
  13316 */
  13317void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
  13318{
  13319    int el = arm_current_el(env);
  13320    int fp_el = fp_exception_el(env, el);
  13321    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  13322
  13323    env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
  13324}
  13325
  13326void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
  13327{
  13328    int fp_el = fp_exception_el(env, el);
  13329    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  13330
  13331    env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
  13332}
  13333
  13334/*
  13335 * If we have triggered a EL state change we can't rely on the
  13336 * translator having passed it to us, we need to recompute.
  13337 */
  13338void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
  13339{
  13340    int el = arm_current_el(env);
  13341    int fp_el = fp_exception_el(env, el);
  13342    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  13343    env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
  13344}
  13345
  13346void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
  13347{
  13348    int fp_el = fp_exception_el(env, el);
  13349    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  13350
  13351    env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
  13352}
  13353
  13354void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
  13355{
  13356    int fp_el = fp_exception_el(env, el);
  13357    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  13358
  13359    env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
  13360}
  13361
  13362static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
  13363{
  13364#ifdef CONFIG_DEBUG_TCG
  13365    CPUARMTBFlags c = env->hflags;
  13366    CPUARMTBFlags r = rebuild_hflags_internal(env);
  13367
  13368    if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
  13369        fprintf(stderr, "TCG hflags mismatch "
  13370                        "(current:(0x%08x,0x" TARGET_FMT_lx ")"
  13371                        " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
  13372                c.flags, c.flags2, r.flags, r.flags2);
  13373        abort();
  13374    }
  13375#endif
  13376}
  13377
  13378static bool mve_no_pred(CPUARMState *env)
  13379{
  13380    /*
  13381     * Return true if there is definitely no predication of MVE
  13382     * instructions by VPR or LTPSIZE. (Returning false even if there
  13383     * isn't any predication is OK; generated code will just be
  13384     * a little worse.)
  13385     * If the CPU does not implement MVE then this TB flag is always 0.
  13386     *
  13387     * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
  13388     * logic in gen_update_fp_context() needs to be updated to match.
  13389     *
  13390     * We do not include the effect of the ECI bits here -- they are
  13391     * tracked in other TB flags. This simplifies the logic for
  13392     * "when did we emit code that changes the MVE_NO_PRED TB flag
  13393     * and thus need to end the TB?".
  13394     */
  13395    if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
  13396        return false;
  13397    }
  13398    if (env->v7m.vpr) {
  13399        return false;
  13400    }
  13401    if (env->v7m.ltpsize < 4) {
  13402        return false;
  13403    }
  13404    return true;
  13405}
  13406
  13407void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
  13408                          target_ulong *cs_base, uint32_t *pflags)
  13409{
  13410    CPUARMTBFlags flags;
  13411
  13412    assert_hflags_rebuild_correctly(env);
  13413    flags = env->hflags;
  13414
  13415    if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
  13416        *pc = env->pc;
  13417        if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
  13418            DP_TBFLAG_A64(flags, BTYPE, env->btype);
  13419        }
  13420    } else {
  13421        *pc = env->regs[15];
  13422
  13423        if (arm_feature(env, ARM_FEATURE_M)) {
  13424            if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
  13425                FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
  13426                != env->v7m.secure) {
  13427                DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
  13428            }
  13429
  13430            if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
  13431                (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
  13432                 (env->v7m.secure &&
  13433                  !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
  13434                /*
  13435                 * ASPEN is set, but FPCA/SFPA indicate that there is no
  13436                 * active FP context; we must create a new FP context before
  13437                 * executing any FP insn.
  13438                 */
  13439                DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
  13440            }
  13441
  13442            bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
  13443            if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
  13444                DP_TBFLAG_M32(flags, LSPACT, 1);
  13445            }
  13446
  13447            if (mve_no_pred(env)) {
  13448                DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
  13449            }
  13450        } else {
  13451            /*
  13452             * Note that XSCALE_CPAR shares bits with VECSTRIDE.
  13453             * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
  13454             */
  13455            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
  13456                DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
  13457            } else {
  13458                DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
  13459                DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
  13460            }
  13461            if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
  13462                DP_TBFLAG_A32(flags, VFPEN, 1);
  13463            }
  13464        }
  13465
  13466        DP_TBFLAG_AM32(flags, THUMB, env->thumb);
  13467        DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
  13468    }
  13469
  13470    /*
  13471     * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
  13472     * states defined in the ARM ARM for software singlestep:
  13473     *  SS_ACTIVE   PSTATE.SS   State
  13474     *     0            x       Inactive (the TB flag for SS is always 0)
  13475     *     1            0       Active-pending
  13476     *     1            1       Active-not-pending
  13477     * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
  13478     */
  13479    if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
  13480        DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
  13481    }
  13482
  13483    *pflags = flags.flags;
  13484    *cs_base = flags.flags2;
  13485}
  13486
  13487#ifdef TARGET_AARCH64
  13488/*
  13489 * The manual says that when SVE is enabled and VQ is widened the
  13490 * implementation is allowed to zero the previously inaccessible
  13491 * portion of the registers.  The corollary to that is that when
  13492 * SVE is enabled and VQ is narrowed we are also allowed to zero
  13493 * the now inaccessible portion of the registers.
  13494 *
  13495 * The intent of this is that no predicate bit beyond VQ is ever set.
  13496 * Which means that some operations on predicate registers themselves
  13497 * may operate on full uint64_t or even unrolled across the maximum
  13498 * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
  13499 * may well be cheaper than conditionals to restrict the operation
  13500 * to the relevant portion of a uint16_t[16].
  13501 */
  13502void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
  13503{
  13504    int i, j;
  13505    uint64_t pmask;
  13506
  13507    assert(vq >= 1 && vq <= ARM_MAX_VQ);
  13508    assert(vq <= env_archcpu(env)->sve_max_vq);
  13509
  13510    /* Zap the high bits of the zregs.  */
  13511    for (i = 0; i < 32; i++) {
  13512        memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
  13513    }
  13514
  13515    /* Zap the high bits of the pregs and ffr.  */
  13516    pmask = 0;
  13517    if (vq & 3) {
  13518        pmask = ~(-1ULL << (16 * (vq & 3)));
  13519    }
  13520    for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
  13521        for (i = 0; i < 17; ++i) {
  13522            env->vfp.pregs[i].p[j] &= pmask;
  13523        }
  13524        pmask = 0;
  13525    }
  13526}
  13527
  13528/*
  13529 * Notice a change in SVE vector size when changing EL.
  13530 */
  13531void aarch64_sve_change_el(CPUARMState *env, int old_el,
  13532                           int new_el, bool el0_a64)
  13533{
  13534    ARMCPU *cpu = env_archcpu(env);
  13535    int old_len, new_len;
  13536    bool old_a64, new_a64;
  13537
  13538    /* Nothing to do if no SVE.  */
  13539    if (!cpu_isar_feature(aa64_sve, cpu)) {
  13540        return;
  13541    }
  13542
  13543    /* Nothing to do if FP is disabled in either EL.  */
  13544    if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
  13545        return;
  13546    }
  13547
  13548    /*
  13549     * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
  13550     * at ELx, or not available because the EL is in AArch32 state, then
  13551     * for all purposes other than a direct read, the ZCR_ELx.LEN field
  13552     * has an effective value of 0".
  13553     *
  13554     * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
  13555     * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
  13556     * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
  13557     * we already have the correct register contents when encountering the
  13558     * vq0->vq0 transition between EL0->EL1.
  13559     */
  13560    old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
  13561    old_len = (old_a64 && !sve_exception_el(env, old_el)
  13562               ? sve_zcr_len_for_el(env, old_el) : 0);
  13563    new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
  13564    new_len = (new_a64 && !sve_exception_el(env, new_el)
  13565               ? sve_zcr_len_for_el(env, new_el) : 0);
  13566
  13567    /* When changing vector length, clear inaccessible state.  */
  13568    if (new_len < old_len) {
  13569        aarch64_sve_narrow_vq(env, new_len + 1);
  13570    }
  13571}
  13572#endif