cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

m_helper.c (106303B)


      1/*
      2 * ARM generic helpers.
      3 *
      4 * This code is licensed under the GNU GPL v2 or later.
      5 *
      6 * SPDX-License-Identifier: GPL-2.0-or-later
      7 */
      8
      9#include "qemu/osdep.h"
     10#include "qemu/units.h"
     11#include "target/arm/idau.h"
     12#include "trace.h"
     13#include "cpu.h"
     14#include "internals.h"
     15#include "exec/gdbstub.h"
     16#include "exec/helper-proto.h"
     17#include "qemu/host-utils.h"
     18#include "qemu/main-loop.h"
     19#include "qemu/bitops.h"
     20#include "qemu/crc32c.h"
     21#include "qemu/qemu-print.h"
     22#include "exec/exec-all.h"
     23#include <zlib.h> /* For crc32 */
     24#include "semihosting/semihost.h"
     25#include "sysemu/cpus.h"
     26#include "sysemu/kvm.h"
     27#include "qemu/range.h"
     28#include "qapi/qapi-commands-machine-target.h"
     29#include "qapi/error.h"
     30#include "qemu/guest-random.h"
     31#ifdef CONFIG_TCG
     32#include "arm_ldst.h"
     33#include "exec/cpu_ldst.h"
     34#include "semihosting/common-semi.h"
     35#endif
     36
     37static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
     38                         uint32_t reg, uint32_t val)
     39{
     40    /* Only APSR is actually writable */
     41    if (!(reg & 4)) {
     42        uint32_t apsrmask = 0;
     43
     44        if (mask & 8) {
     45            apsrmask |= XPSR_NZCV | XPSR_Q;
     46        }
     47        if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
     48            apsrmask |= XPSR_GE;
     49        }
     50        xpsr_write(env, val, apsrmask);
     51    }
     52}
     53
     54static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
     55{
     56    uint32_t mask = 0;
     57
     58    if ((reg & 1) && el) {
     59        mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
     60    }
     61    if (!(reg & 4)) {
     62        mask |= XPSR_NZCV | XPSR_Q; /* APSR */
     63        if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
     64            mask |= XPSR_GE;
     65        }
     66    }
     67    /* EPSR reads as zero */
     68    return xpsr_read(env) & mask;
     69}
     70
     71static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
     72{
     73    uint32_t value = env->v7m.control[secure];
     74
     75    if (!secure) {
     76        /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
     77        value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
     78    }
     79    return value;
     80}
     81
     82#ifdef CONFIG_USER_ONLY
     83
     84void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
     85{
     86    uint32_t mask = extract32(maskreg, 8, 4);
     87    uint32_t reg = extract32(maskreg, 0, 8);
     88
     89    switch (reg) {
     90    case 0 ... 7: /* xPSR sub-fields */
     91        v7m_msr_xpsr(env, mask, reg, val);
     92        break;
     93    case 20: /* CONTROL */
     94        /* There are no sub-fields that are actually writable from EL0. */
     95        break;
     96    default:
     97        /* Unprivileged writes to other registers are ignored */
     98        break;
     99    }
    100}
    101
    102uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
    103{
    104    switch (reg) {
    105    case 0 ... 7: /* xPSR sub-fields */
    106        return v7m_mrs_xpsr(env, reg, 0);
    107    case 20: /* CONTROL */
    108        return v7m_mrs_control(env, 0);
    109    default:
    110        /* Unprivileged reads others as zero.  */
    111        return 0;
    112    }
    113}
    114
    115void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
    116{
    117    /* translate.c should never generate calls here in user-only mode */
    118    g_assert_not_reached();
    119}
    120
    121void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
    122{
    123    /* translate.c should never generate calls here in user-only mode */
    124    g_assert_not_reached();
    125}
    126
    127void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
    128{
    129    /* translate.c should never generate calls here in user-only mode */
    130    g_assert_not_reached();
    131}
    132
    133void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
    134{
    135    /* translate.c should never generate calls here in user-only mode */
    136    g_assert_not_reached();
    137}
    138
    139void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
    140{
    141    /* translate.c should never generate calls here in user-only mode */
    142    g_assert_not_reached();
    143}
    144
    145uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
    146{
    147    /*
    148     * The TT instructions can be used by unprivileged code, but in
    149     * user-only emulation we don't have the MPU.
    150     * Luckily since we know we are NonSecure unprivileged (and that in
    151     * turn means that the A flag wasn't specified), all the bits in the
    152     * register must be zero:
    153     *  IREGION: 0 because IRVALID is 0
    154     *  IRVALID: 0 because NS
    155     *  S: 0 because NS
    156     *  NSRW: 0 because NS
    157     *  NSR: 0 because NS
    158     *  RW: 0 because unpriv and A flag not set
    159     *  R: 0 because unpriv and A flag not set
    160     *  SRVALID: 0 because NS
    161     *  MRVALID: 0 because unpriv and A flag not set
    162     *  SREGION: 0 becaus SRVALID is 0
    163     *  MREGION: 0 because MRVALID is 0
    164     */
    165    return 0;
    166}
    167
    168#else
    169
    170/*
    171 * What kind of stack write are we doing? This affects how exceptions
    172 * generated during the stacking are treated.
    173 */
    174typedef enum StackingMode {
    175    STACK_NORMAL,
    176    STACK_IGNFAULTS,
    177    STACK_LAZYFP,
    178} StackingMode;
    179
    180static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
    181                            ARMMMUIdx mmu_idx, StackingMode mode)
    182{
    183    CPUState *cs = CPU(cpu);
    184    CPUARMState *env = &cpu->env;
    185    MemTxAttrs attrs = {};
    186    MemTxResult txres;
    187    target_ulong page_size;
    188    hwaddr physaddr;
    189    int prot;
    190    ARMMMUFaultInfo fi = {};
    191    ARMCacheAttrs cacheattrs = {};
    192    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
    193    int exc;
    194    bool exc_secure;
    195
    196    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
    197                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
    198        /* MPU/SAU lookup failed */
    199        if (fi.type == ARMFault_QEMU_SFault) {
    200            if (mode == STACK_LAZYFP) {
    201                qemu_log_mask(CPU_LOG_INT,
    202                              "...SecureFault with SFSR.LSPERR "
    203                              "during lazy stacking\n");
    204                env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
    205            } else {
    206                qemu_log_mask(CPU_LOG_INT,
    207                              "...SecureFault with SFSR.AUVIOL "
    208                              "during stacking\n");
    209                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
    210            }
    211            env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
    212            env->v7m.sfar = addr;
    213            exc = ARMV7M_EXCP_SECURE;
    214            exc_secure = false;
    215        } else {
    216            if (mode == STACK_LAZYFP) {
    217                qemu_log_mask(CPU_LOG_INT,
    218                              "...MemManageFault with CFSR.MLSPERR\n");
    219                env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
    220            } else {
    221                qemu_log_mask(CPU_LOG_INT,
    222                              "...MemManageFault with CFSR.MSTKERR\n");
    223                env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
    224            }
    225            exc = ARMV7M_EXCP_MEM;
    226            exc_secure = secure;
    227        }
    228        goto pend_fault;
    229    }
    230    address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
    231                         attrs, &txres);
    232    if (txres != MEMTX_OK) {
    233        /* BusFault trying to write the data */
    234        if (mode == STACK_LAZYFP) {
    235            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
    236            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
    237        } else {
    238            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
    239            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
    240        }
    241        exc = ARMV7M_EXCP_BUS;
    242        exc_secure = false;
    243        goto pend_fault;
    244    }
    245    return true;
    246
    247pend_fault:
    248    /*
    249     * By pending the exception at this point we are making
    250     * the IMPDEF choice "overridden exceptions pended" (see the
    251     * MergeExcInfo() pseudocode). The other choice would be to not
    252     * pend them now and then make a choice about which to throw away
    253     * later if we have two derived exceptions.
    254     * The only case when we must not pend the exception but instead
    255     * throw it away is if we are doing the push of the callee registers
    256     * and we've already generated a derived exception (this is indicated
    257     * by the caller passing STACK_IGNFAULTS). Even in this case we will
    258     * still update the fault status registers.
    259     */
    260    switch (mode) {
    261    case STACK_NORMAL:
    262        armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
    263        break;
    264    case STACK_LAZYFP:
    265        armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
    266        break;
    267    case STACK_IGNFAULTS:
    268        break;
    269    }
    270    return false;
    271}
    272
    273static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
    274                           ARMMMUIdx mmu_idx)
    275{
    276    CPUState *cs = CPU(cpu);
    277    CPUARMState *env = &cpu->env;
    278    MemTxAttrs attrs = {};
    279    MemTxResult txres;
    280    target_ulong page_size;
    281    hwaddr physaddr;
    282    int prot;
    283    ARMMMUFaultInfo fi = {};
    284    ARMCacheAttrs cacheattrs = {};
    285    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
    286    int exc;
    287    bool exc_secure;
    288    uint32_t value;
    289
    290    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
    291                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
    292        /* MPU/SAU lookup failed */
    293        if (fi.type == ARMFault_QEMU_SFault) {
    294            qemu_log_mask(CPU_LOG_INT,
    295                          "...SecureFault with SFSR.AUVIOL during unstack\n");
    296            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
    297            env->v7m.sfar = addr;
    298            exc = ARMV7M_EXCP_SECURE;
    299            exc_secure = false;
    300        } else {
    301            qemu_log_mask(CPU_LOG_INT,
    302                          "...MemManageFault with CFSR.MUNSTKERR\n");
    303            env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
    304            exc = ARMV7M_EXCP_MEM;
    305            exc_secure = secure;
    306        }
    307        goto pend_fault;
    308    }
    309
    310    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
    311                              attrs, &txres);
    312    if (txres != MEMTX_OK) {
    313        /* BusFault trying to read the data */
    314        qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
    315        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
    316        exc = ARMV7M_EXCP_BUS;
    317        exc_secure = false;
    318        goto pend_fault;
    319    }
    320
    321    *dest = value;
    322    return true;
    323
    324pend_fault:
    325    /*
    326     * By pending the exception at this point we are making
    327     * the IMPDEF choice "overridden exceptions pended" (see the
    328     * MergeExcInfo() pseudocode). The other choice would be to not
    329     * pend them now and then make a choice about which to throw away
    330     * later if we have two derived exceptions.
    331     */
    332    armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
    333    return false;
    334}
    335
    336void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
    337{
    338    /*
    339     * Preserve FP state (because LSPACT was set and we are about
    340     * to execute an FP instruction). This corresponds to the
    341     * PreserveFPState() pseudocode.
    342     * We may throw an exception if the stacking fails.
    343     */
    344    ARMCPU *cpu = env_archcpu(env);
    345    bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
    346    bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
    347    bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
    348    bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
    349    uint32_t fpcar = env->v7m.fpcar[is_secure];
    350    bool stacked_ok = true;
    351    bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
    352    bool take_exception;
    353
    354    /* Take the iothread lock as we are going to touch the NVIC */
    355    qemu_mutex_lock_iothread();
    356
    357    /* Check the background context had access to the FPU */
    358    if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
    359        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
    360        env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
    361        stacked_ok = false;
    362    } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
    363        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
    364        env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
    365        stacked_ok = false;
    366    }
    367
    368    if (!splimviol && stacked_ok) {
    369        /* We only stack if the stack limit wasn't violated */
    370        int i;
    371        ARMMMUIdx mmu_idx;
    372
    373        mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
    374        for (i = 0; i < (ts ? 32 : 16); i += 2) {
    375            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
    376            uint32_t faddr = fpcar + 4 * i;
    377            uint32_t slo = extract64(dn, 0, 32);
    378            uint32_t shi = extract64(dn, 32, 32);
    379
    380            if (i >= 16) {
    381                faddr += 8; /* skip the slot for the FPSCR/VPR */
    382            }
    383            stacked_ok = stacked_ok &&
    384                v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
    385                v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
    386        }
    387
    388        stacked_ok = stacked_ok &&
    389            v7m_stack_write(cpu, fpcar + 0x40,
    390                            vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
    391        if (cpu_isar_feature(aa32_mve, cpu)) {
    392            stacked_ok = stacked_ok &&
    393                v7m_stack_write(cpu, fpcar + 0x44,
    394                                env->v7m.vpr, mmu_idx, STACK_LAZYFP);
    395        }
    396    }
    397
    398    /*
    399     * We definitely pended an exception, but it's possible that it
    400     * might not be able to be taken now. If its priority permits us
    401     * to take it now, then we must not update the LSPACT or FP regs,
    402     * but instead jump out to take the exception immediately.
    403     * If it's just pending and won't be taken until the current
    404     * handler exits, then we do update LSPACT and the FP regs.
    405     */
    406    take_exception = !stacked_ok &&
    407        armv7m_nvic_can_take_pending_exception(env->nvic);
    408
    409    qemu_mutex_unlock_iothread();
    410
    411    if (take_exception) {
    412        raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
    413    }
    414
    415    env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
    416
    417    if (ts) {
    418        /* Clear s0 to s31 and the FPSCR and VPR */
    419        int i;
    420
    421        for (i = 0; i < 32; i += 2) {
    422            *aa32_vfp_dreg(env, i / 2) = 0;
    423        }
    424        vfp_set_fpscr(env, 0);
    425        if (cpu_isar_feature(aa32_mve, cpu)) {
    426            env->v7m.vpr = 0;
    427        }
    428    }
    429    /*
    430     * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
    431     * unchanged.
    432     */
    433}
    434
    435/*
    436 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
    437 * This may change the current stack pointer between Main and Process
    438 * stack pointers if it is done for the CONTROL register for the current
    439 * security state.
    440 */
    441static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
    442                                                 bool new_spsel,
    443                                                 bool secstate)
    444{
    445    bool old_is_psp = v7m_using_psp(env);
    446
    447    env->v7m.control[secstate] =
    448        deposit32(env->v7m.control[secstate],
    449                  R_V7M_CONTROL_SPSEL_SHIFT,
    450                  R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
    451
    452    if (secstate == env->v7m.secure) {
    453        bool new_is_psp = v7m_using_psp(env);
    454        uint32_t tmp;
    455
    456        if (old_is_psp != new_is_psp) {
    457            tmp = env->v7m.other_sp;
    458            env->v7m.other_sp = env->regs[13];
    459            env->regs[13] = tmp;
    460        }
    461    }
    462}
    463
    464/*
    465 * Write to v7M CONTROL.SPSEL bit. This may change the current
    466 * stack pointer between Main and Process stack pointers.
    467 */
    468static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
    469{
    470    write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
    471}
    472
    473void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
    474{
    475    /*
    476     * Write a new value to v7m.exception, thus transitioning into or out
    477     * of Handler mode; this may result in a change of active stack pointer.
    478     */
    479    bool new_is_psp, old_is_psp = v7m_using_psp(env);
    480    uint32_t tmp;
    481
    482    env->v7m.exception = new_exc;
    483
    484    new_is_psp = v7m_using_psp(env);
    485
    486    if (old_is_psp != new_is_psp) {
    487        tmp = env->v7m.other_sp;
    488        env->v7m.other_sp = env->regs[13];
    489        env->regs[13] = tmp;
    490    }
    491}
    492
    493/* Switch M profile security state between NS and S */
    494static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
    495{
    496    uint32_t new_ss_msp, new_ss_psp;
    497
    498    if (env->v7m.secure == new_secstate) {
    499        return;
    500    }
    501
    502    /*
    503     * All the banked state is accessed by looking at env->v7m.secure
    504     * except for the stack pointer; rearrange the SP appropriately.
    505     */
    506    new_ss_msp = env->v7m.other_ss_msp;
    507    new_ss_psp = env->v7m.other_ss_psp;
    508
    509    if (v7m_using_psp(env)) {
    510        env->v7m.other_ss_psp = env->regs[13];
    511        env->v7m.other_ss_msp = env->v7m.other_sp;
    512    } else {
    513        env->v7m.other_ss_msp = env->regs[13];
    514        env->v7m.other_ss_psp = env->v7m.other_sp;
    515    }
    516
    517    env->v7m.secure = new_secstate;
    518
    519    if (v7m_using_psp(env)) {
    520        env->regs[13] = new_ss_psp;
    521        env->v7m.other_sp = new_ss_msp;
    522    } else {
    523        env->regs[13] = new_ss_msp;
    524        env->v7m.other_sp = new_ss_psp;
    525    }
    526}
    527
    528void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
    529{
    530    /*
    531     * Handle v7M BXNS:
    532     *  - if the return value is a magic value, do exception return (like BX)
    533     *  - otherwise bit 0 of the return value is the target security state
    534     */
    535    uint32_t min_magic;
    536
    537    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    538        /* Covers FNC_RETURN and EXC_RETURN magic */
    539        min_magic = FNC_RETURN_MIN_MAGIC;
    540    } else {
    541        /* EXC_RETURN magic only */
    542        min_magic = EXC_RETURN_MIN_MAGIC;
    543    }
    544
    545    if (dest >= min_magic) {
    546        /*
    547         * This is an exception return magic value; put it where
    548         * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
    549         * Note that if we ever add gen_ss_advance() singlestep support to
    550         * M profile this should count as an "instruction execution complete"
    551         * event (compare gen_bx_excret_final_code()).
    552         */
    553        env->regs[15] = dest & ~1;
    554        env->thumb = dest & 1;
    555        HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
    556        /* notreached */
    557    }
    558
    559    /* translate.c should have made BXNS UNDEF unless we're secure */
    560    assert(env->v7m.secure);
    561
    562    if (!(dest & 1)) {
    563        env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
    564    }
    565    switch_v7m_security_state(env, dest & 1);
    566    env->thumb = 1;
    567    env->regs[15] = dest & ~1;
    568    arm_rebuild_hflags(env);
    569}
    570
    571void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
    572{
    573    /*
    574     * Handle v7M BLXNS:
    575     *  - bit 0 of the destination address is the target security state
    576     */
    577
    578    /* At this point regs[15] is the address just after the BLXNS */
    579    uint32_t nextinst = env->regs[15] | 1;
    580    uint32_t sp = env->regs[13] - 8;
    581    uint32_t saved_psr;
    582
    583    /* translate.c will have made BLXNS UNDEF unless we're secure */
    584    assert(env->v7m.secure);
    585
    586    if (dest & 1) {
    587        /*
    588         * Target is Secure, so this is just a normal BLX,
    589         * except that the low bit doesn't indicate Thumb/not.
    590         */
    591        env->regs[14] = nextinst;
    592        env->thumb = 1;
    593        env->regs[15] = dest & ~1;
    594        return;
    595    }
    596
    597    /* Target is non-secure: first push a stack frame */
    598    if (!QEMU_IS_ALIGNED(sp, 8)) {
    599        qemu_log_mask(LOG_GUEST_ERROR,
    600                      "BLXNS with misaligned SP is UNPREDICTABLE\n");
    601    }
    602
    603    if (sp < v7m_sp_limit(env)) {
    604        raise_exception(env, EXCP_STKOF, 0, 1);
    605    }
    606
    607    saved_psr = env->v7m.exception;
    608    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
    609        saved_psr |= XPSR_SFPA;
    610    }
    611
    612    /* Note that these stores can throw exceptions on MPU faults */
    613    cpu_stl_data_ra(env, sp, nextinst, GETPC());
    614    cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
    615
    616    env->regs[13] = sp;
    617    env->regs[14] = 0xfeffffff;
    618    if (arm_v7m_is_handler_mode(env)) {
    619        /*
    620         * Write a dummy value to IPSR, to avoid leaking the current secure
    621         * exception number to non-secure code. This is guaranteed not
    622         * to cause write_v7m_exception() to actually change stacks.
    623         */
    624        write_v7m_exception(env, 1);
    625    }
    626    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
    627    switch_v7m_security_state(env, 0);
    628    env->thumb = 1;
    629    env->regs[15] = dest;
    630    arm_rebuild_hflags(env);
    631}
    632
    633static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
    634                                bool spsel)
    635{
    636    /*
    637     * Return a pointer to the location where we currently store the
    638     * stack pointer for the requested security state and thread mode.
    639     * This pointer will become invalid if the CPU state is updated
    640     * such that the stack pointers are switched around (eg changing
    641     * the SPSEL control bit).
    642     * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
    643     * Unlike that pseudocode, we require the caller to pass us in the
    644     * SPSEL control bit value; this is because we also use this
    645     * function in handling of pushing of the callee-saves registers
    646     * part of the v8M stack frame (pseudocode PushCalleeStack()),
    647     * and in the tailchain codepath the SPSEL bit comes from the exception
    648     * return magic LR value from the previous exception. The pseudocode
    649     * opencodes the stack-selection in PushCalleeStack(), but we prefer
    650     * to make this utility function generic enough to do the job.
    651     */
    652    bool want_psp = threadmode && spsel;
    653
    654    if (secure == env->v7m.secure) {
    655        if (want_psp == v7m_using_psp(env)) {
    656            return &env->regs[13];
    657        } else {
    658            return &env->v7m.other_sp;
    659        }
    660    } else {
    661        if (want_psp) {
    662            return &env->v7m.other_ss_psp;
    663        } else {
    664            return &env->v7m.other_ss_msp;
    665        }
    666    }
    667}
    668
    669static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
    670                                uint32_t *pvec)
    671{
    672    CPUState *cs = CPU(cpu);
    673    CPUARMState *env = &cpu->env;
    674    MemTxResult result;
    675    uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
    676    uint32_t vector_entry;
    677    MemTxAttrs attrs = {};
    678    ARMMMUIdx mmu_idx;
    679    bool exc_secure;
    680
    681    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
    682
    683    /*
    684     * We don't do a get_phys_addr() here because the rules for vector
    685     * loads are special: they always use the default memory map, and
    686     * the default memory map permits reads from all addresses.
    687     * Since there's no easy way to pass through to pmsav8_mpu_lookup()
    688     * that we want this special case which would always say "yes",
    689     * we just do the SAU lookup here followed by a direct physical load.
    690     */
    691    attrs.secure = targets_secure;
    692    attrs.user = false;
    693
    694    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    695        V8M_SAttributes sattrs = {};
    696
    697        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
    698        if (sattrs.ns) {
    699            attrs.secure = false;
    700        } else if (!targets_secure) {
    701            /*
    702             * NS access to S memory: the underlying exception which we escalate
    703             * to HardFault is SecureFault, which always targets Secure.
    704             */
    705            exc_secure = true;
    706            goto load_fail;
    707        }
    708    }
    709
    710    vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
    711                                     attrs, &result);
    712    if (result != MEMTX_OK) {
    713        /*
    714         * Underlying exception is BusFault: its target security state
    715         * depends on BFHFNMINS.
    716         */
    717        exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
    718        goto load_fail;
    719    }
    720    *pvec = vector_entry;
    721    return true;
    722
    723load_fail:
    724    /*
    725     * All vector table fetch fails are reported as HardFault, with
    726     * HFSR.VECTTBL and .FORCED set. (FORCED is set because
    727     * technically the underlying exception is a SecureFault or BusFault
    728     * that is escalated to HardFault.) This is a terminal exception,
    729     * so we will either take the HardFault immediately or else enter
    730     * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
    731     * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
    732     * secure); otherwise it targets the same security state as the
    733     * underlying exception.
    734     * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
    735     */
    736    if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
    737        exc_secure = true;
    738    }
    739    env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
    740    if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
    741        env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
    742    }
    743    armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
    744    return false;
    745}
    746
    747static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
    748{
    749    /*
    750     * Return the integrity signature value for the callee-saves
    751     * stack frame section. @lr is the exception return payload/LR value
    752     * whose FType bit forms bit 0 of the signature if FP is present.
    753     */
    754    uint32_t sig = 0xfefa125a;
    755
    756    if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
    757        || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
    758        sig |= 1;
    759    }
    760    return sig;
    761}
    762
    763static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
    764                                  bool ignore_faults)
    765{
    766    /*
    767     * For v8M, push the callee-saves register part of the stack frame.
    768     * Compare the v8M pseudocode PushCalleeStack().
    769     * In the tailchaining case this may not be the current stack.
    770     */
    771    CPUARMState *env = &cpu->env;
    772    uint32_t *frame_sp_p;
    773    uint32_t frameptr;
    774    ARMMMUIdx mmu_idx;
    775    bool stacked_ok;
    776    uint32_t limit;
    777    bool want_psp;
    778    uint32_t sig;
    779    StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
    780
    781    if (dotailchain) {
    782        bool mode = lr & R_V7M_EXCRET_MODE_MASK;
    783        bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
    784            !mode;
    785
    786        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
    787        frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
    788                                    lr & R_V7M_EXCRET_SPSEL_MASK);
    789        want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
    790        if (want_psp) {
    791            limit = env->v7m.psplim[M_REG_S];
    792        } else {
    793            limit = env->v7m.msplim[M_REG_S];
    794        }
    795    } else {
    796        mmu_idx = arm_mmu_idx(env);
    797        frame_sp_p = &env->regs[13];
    798        limit = v7m_sp_limit(env);
    799    }
    800
    801    frameptr = *frame_sp_p - 0x28;
    802    if (frameptr < limit) {
    803        /*
    804         * Stack limit failure: set SP to the limit value, and generate
    805         * STKOF UsageFault. Stack pushes below the limit must not be
    806         * performed. It is IMPDEF whether pushes above the limit are
    807         * performed; we choose not to.
    808         */
    809        qemu_log_mask(CPU_LOG_INT,
    810                      "...STKOF during callee-saves register stacking\n");
    811        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
    812        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
    813                                env->v7m.secure);
    814        *frame_sp_p = limit;
    815        return true;
    816    }
    817
    818    /*
    819     * Write as much of the stack frame as we can. A write failure may
    820     * cause us to pend a derived exception.
    821     */
    822    sig = v7m_integrity_sig(env, lr);
    823    stacked_ok =
    824        v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
    825        v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
    826        v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
    827        v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
    828        v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
    829        v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
    830        v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
    831        v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
    832        v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
    833
    834    /* Update SP regardless of whether any of the stack accesses failed. */
    835    *frame_sp_p = frameptr;
    836
    837    return !stacked_ok;
    838}
    839
    840static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
    841                                bool ignore_stackfaults)
    842{
    843    /*
    844     * Do the "take the exception" parts of exception entry,
    845     * but not the pushing of state to the stack. This is
    846     * similar to the pseudocode ExceptionTaken() function.
    847     */
    848    CPUARMState *env = &cpu->env;
    849    uint32_t addr;
    850    bool targets_secure;
    851    int exc;
    852    bool push_failed = false;
    853
    854    armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
    855    qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
    856                  targets_secure ? "secure" : "nonsecure", exc);
    857
    858    if (dotailchain) {
    859        /* Sanitize LR FType and PREFIX bits */
    860        if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
    861            lr |= R_V7M_EXCRET_FTYPE_MASK;
    862        }
    863        lr = deposit32(lr, 24, 8, 0xff);
    864    }
    865
    866    if (arm_feature(env, ARM_FEATURE_V8)) {
    867        if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
    868            (lr & R_V7M_EXCRET_S_MASK)) {
    869            /*
    870             * The background code (the owner of the registers in the
    871             * exception frame) is Secure. This means it may either already
    872             * have or now needs to push callee-saves registers.
    873             */
    874            if (targets_secure) {
    875                if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
    876                    /*
    877                     * We took an exception from Secure to NonSecure
    878                     * (which means the callee-saved registers got stacked)
    879                     * and are now tailchaining to a Secure exception.
    880                     * Clear DCRS so eventual return from this Secure
    881                     * exception unstacks the callee-saved registers.
    882                     */
    883                    lr &= ~R_V7M_EXCRET_DCRS_MASK;
    884                }
    885            } else {
    886                /*
    887                 * We're going to a non-secure exception; push the
    888                 * callee-saves registers to the stack now, if they're
    889                 * not already saved.
    890                 */
    891                if (lr & R_V7M_EXCRET_DCRS_MASK &&
    892                    !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
    893                    push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
    894                                                        ignore_stackfaults);
    895                }
    896                lr |= R_V7M_EXCRET_DCRS_MASK;
    897            }
    898        }
    899
    900        lr &= ~R_V7M_EXCRET_ES_MASK;
    901        if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    902            lr |= R_V7M_EXCRET_ES_MASK;
    903        }
    904        lr &= ~R_V7M_EXCRET_SPSEL_MASK;
    905        if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
    906            lr |= R_V7M_EXCRET_SPSEL_MASK;
    907        }
    908
    909        /*
    910         * Clear registers if necessary to prevent non-secure exception
    911         * code being able to see register values from secure code.
    912         * Where register values become architecturally UNKNOWN we leave
    913         * them with their previous values. v8.1M is tighter than v8.0M
    914         * here and always zeroes the caller-saved registers regardless
    915         * of the security state the exception is targeting.
    916         */
    917        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    918            if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
    919                /*
    920                 * Always clear the caller-saved registers (they have been
    921                 * pushed to the stack earlier in v7m_push_stack()).
    922                 * Clear callee-saved registers if the background code is
    923                 * Secure (in which case these regs were saved in
    924                 * v7m_push_callee_stack()).
    925                 */
    926                int i;
    927                /*
    928                 * r4..r11 are callee-saves, zero only if background
    929                 * state was Secure (EXCRET.S == 1) and exception
    930                 * targets Non-secure state
    931                 */
    932                bool zero_callee_saves = !targets_secure &&
    933                    (lr & R_V7M_EXCRET_S_MASK);
    934
    935                for (i = 0; i < 13; i++) {
    936                    if (i < 4 || i > 11 || zero_callee_saves) {
    937                        env->regs[i] = 0;
    938                    }
    939                }
    940                /* Clear EAPSR */
    941                xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
    942            }
    943        }
    944    }
    945
    946    if (push_failed && !ignore_stackfaults) {
    947        /*
    948         * Derived exception on callee-saves register stacking:
    949         * we might now want to take a different exception which
    950         * targets a different security state, so try again from the top.
    951         */
    952        qemu_log_mask(CPU_LOG_INT,
    953                      "...derived exception on callee-saves register stacking");
    954        v7m_exception_taken(cpu, lr, true, true);
    955        return;
    956    }
    957
    958    if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
    959        /* Vector load failed: derived exception */
    960        qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
    961        v7m_exception_taken(cpu, lr, true, true);
    962        return;
    963    }
    964
    965    /*
    966     * Now we've done everything that might cause a derived exception
    967     * we can go ahead and activate whichever exception we're going to
    968     * take (which might now be the derived exception).
    969     */
    970    armv7m_nvic_acknowledge_irq(env->nvic);
    971
    972    /* Switch to target security state -- must do this before writing SPSEL */
    973    switch_v7m_security_state(env, targets_secure);
    974    write_v7m_control_spsel(env, 0);
    975    arm_clear_exclusive(env);
    976    /* Clear SFPA and FPCA (has no effect if no FPU) */
    977    env->v7m.control[M_REG_S] &=
    978        ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
    979    /* Clear IT bits */
    980    env->condexec_bits = 0;
    981    env->regs[14] = lr;
    982    env->regs[15] = addr & 0xfffffffe;
    983    env->thumb = addr & 1;
    984    arm_rebuild_hflags(env);
    985}
    986
    987static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
    988                             bool apply_splim)
    989{
    990    /*
    991     * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
    992     * that we will need later in order to do lazy FP reg stacking.
    993     */
    994    bool is_secure = env->v7m.secure;
    995    void *nvic = env->nvic;
    996    /*
    997     * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
    998     * are banked and we want to update the bit in the bank for the
    999     * current security state; and in one case we want to specifically
   1000     * update the NS banked version of a bit even if we are secure.
   1001     */
   1002    uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
   1003    uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
   1004    uint32_t *fpccr = &env->v7m.fpccr[is_secure];
   1005    bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
   1006
   1007    env->v7m.fpcar[is_secure] = frameptr & ~0x7;
   1008
   1009    if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
   1010        bool splimviol;
   1011        uint32_t splim = v7m_sp_limit(env);
   1012        bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
   1013            (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
   1014
   1015        splimviol = !ign && frameptr < splim;
   1016        *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
   1017    }
   1018
   1019    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
   1020
   1021    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
   1022
   1023    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
   1024
   1025    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
   1026                        !arm_v7m_is_handler_mode(env));
   1027
   1028    hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
   1029    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
   1030
   1031    bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
   1032    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
   1033
   1034    mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
   1035    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
   1036
   1037    ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
   1038    *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
   1039
   1040    monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
   1041    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
   1042
   1043    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1044        s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
   1045        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
   1046
   1047        sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
   1048        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
   1049    }
   1050}
   1051
   1052void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
   1053{
   1054    /* fptr is the value of Rn, the frame pointer we store the FP regs to */
   1055    ARMCPU *cpu = env_archcpu(env);
   1056    bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
   1057    bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
   1058    uintptr_t ra = GETPC();
   1059
   1060    assert(env->v7m.secure);
   1061
   1062    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
   1063        return;
   1064    }
   1065
   1066    /* Check access to the coprocessor is permitted */
   1067    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
   1068        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
   1069    }
   1070
   1071    if (lspact) {
   1072        /* LSPACT should not be active when there is active FP state */
   1073        raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
   1074    }
   1075
   1076    if (fptr & 7) {
   1077        raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
   1078    }
   1079
   1080    /*
   1081     * Note that we do not use v7m_stack_write() here, because the
   1082     * accesses should not set the FSR bits for stacking errors if they
   1083     * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
   1084     * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
   1085     * and longjmp out.
   1086     */
   1087    if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
   1088        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
   1089        int i;
   1090
   1091        for (i = 0; i < (ts ? 32 : 16); i += 2) {
   1092            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
   1093            uint32_t faddr = fptr + 4 * i;
   1094            uint32_t slo = extract64(dn, 0, 32);
   1095            uint32_t shi = extract64(dn, 32, 32);
   1096
   1097            if (i >= 16) {
   1098                faddr += 8; /* skip the slot for the FPSCR */
   1099            }
   1100            cpu_stl_data_ra(env, faddr, slo, ra);
   1101            cpu_stl_data_ra(env, faddr + 4, shi, ra);
   1102        }
   1103        cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
   1104        if (cpu_isar_feature(aa32_mve, cpu)) {
   1105            cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
   1106        }
   1107
   1108        /*
   1109         * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
   1110         * leave them unchanged, matching our choice in v7m_preserve_fp_state.
   1111         */
   1112        if (ts) {
   1113            for (i = 0; i < 32; i += 2) {
   1114                *aa32_vfp_dreg(env, i / 2) = 0;
   1115            }
   1116            vfp_set_fpscr(env, 0);
   1117            if (cpu_isar_feature(aa32_mve, cpu)) {
   1118                env->v7m.vpr = 0;
   1119            }
   1120        }
   1121    } else {
   1122        v7m_update_fpccr(env, fptr, false);
   1123    }
   1124
   1125    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
   1126}
   1127
   1128void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
   1129{
   1130    ARMCPU *cpu = env_archcpu(env);
   1131    uintptr_t ra = GETPC();
   1132
   1133    /* fptr is the value of Rn, the frame pointer we load the FP regs from */
   1134    assert(env->v7m.secure);
   1135
   1136    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
   1137        return;
   1138    }
   1139
   1140    /* Check access to the coprocessor is permitted */
   1141    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
   1142        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
   1143    }
   1144
   1145    if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
   1146        /* State in FP is still valid */
   1147        env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
   1148    } else {
   1149        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
   1150        int i;
   1151        uint32_t fpscr;
   1152
   1153        if (fptr & 7) {
   1154            raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
   1155        }
   1156
   1157        for (i = 0; i < (ts ? 32 : 16); i += 2) {
   1158            uint32_t slo, shi;
   1159            uint64_t dn;
   1160            uint32_t faddr = fptr + 4 * i;
   1161
   1162            if (i >= 16) {
   1163                faddr += 8; /* skip the slot for the FPSCR and VPR */
   1164            }
   1165
   1166            slo = cpu_ldl_data_ra(env, faddr, ra);
   1167            shi = cpu_ldl_data_ra(env, faddr + 4, ra);
   1168
   1169            dn = (uint64_t) shi << 32 | slo;
   1170            *aa32_vfp_dreg(env, i / 2) = dn;
   1171        }
   1172        fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
   1173        vfp_set_fpscr(env, fpscr);
   1174        if (cpu_isar_feature(aa32_mve, cpu)) {
   1175            env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
   1176        }
   1177    }
   1178
   1179    env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
   1180}
   1181
   1182static bool v7m_push_stack(ARMCPU *cpu)
   1183{
   1184    /*
   1185     * Do the "set up stack frame" part of exception entry,
   1186     * similar to pseudocode PushStack().
   1187     * Return true if we generate a derived exception (and so
   1188     * should ignore further stack faults trying to process
   1189     * that derived exception.)
   1190     */
   1191    bool stacked_ok = true, limitviol = false;
   1192    CPUARMState *env = &cpu->env;
   1193    uint32_t xpsr = xpsr_read(env);
   1194    uint32_t frameptr = env->regs[13];
   1195    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
   1196    uint32_t framesize;
   1197    bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
   1198
   1199    if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
   1200        (env->v7m.secure || nsacr_cp10)) {
   1201        if (env->v7m.secure &&
   1202            env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
   1203            framesize = 0xa8;
   1204        } else {
   1205            framesize = 0x68;
   1206        }
   1207    } else {
   1208        framesize = 0x20;
   1209    }
   1210
   1211    /* Align stack pointer if the guest wants that */
   1212    if ((frameptr & 4) &&
   1213        (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
   1214        frameptr -= 4;
   1215        xpsr |= XPSR_SPREALIGN;
   1216    }
   1217
   1218    xpsr &= ~XPSR_SFPA;
   1219    if (env->v7m.secure &&
   1220        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
   1221        xpsr |= XPSR_SFPA;
   1222    }
   1223
   1224    frameptr -= framesize;
   1225
   1226    if (arm_feature(env, ARM_FEATURE_V8)) {
   1227        uint32_t limit = v7m_sp_limit(env);
   1228
   1229        if (frameptr < limit) {
   1230            /*
   1231             * Stack limit failure: set SP to the limit value, and generate
   1232             * STKOF UsageFault. Stack pushes below the limit must not be
   1233             * performed. It is IMPDEF whether pushes above the limit are
   1234             * performed; we choose not to.
   1235             */
   1236            qemu_log_mask(CPU_LOG_INT,
   1237                          "...STKOF during stacking\n");
   1238            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
   1239            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1240                                    env->v7m.secure);
   1241            env->regs[13] = limit;
   1242            /*
   1243             * We won't try to perform any further memory accesses but
   1244             * we must continue through the following code to check for
   1245             * permission faults during FPU state preservation, and we
   1246             * must update FPCCR if lazy stacking is enabled.
   1247             */
   1248            limitviol = true;
   1249            stacked_ok = false;
   1250        }
   1251    }
   1252
   1253    /*
   1254     * Write as much of the stack frame as we can. If we fail a stack
   1255     * write this will result in a derived exception being pended
   1256     * (which may be taken in preference to the one we started with
   1257     * if it has higher priority).
   1258     */
   1259    stacked_ok = stacked_ok &&
   1260        v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
   1261        v7m_stack_write(cpu, frameptr + 4, env->regs[1],
   1262                        mmu_idx, STACK_NORMAL) &&
   1263        v7m_stack_write(cpu, frameptr + 8, env->regs[2],
   1264                        mmu_idx, STACK_NORMAL) &&
   1265        v7m_stack_write(cpu, frameptr + 12, env->regs[3],
   1266                        mmu_idx, STACK_NORMAL) &&
   1267        v7m_stack_write(cpu, frameptr + 16, env->regs[12],
   1268                        mmu_idx, STACK_NORMAL) &&
   1269        v7m_stack_write(cpu, frameptr + 20, env->regs[14],
   1270                        mmu_idx, STACK_NORMAL) &&
   1271        v7m_stack_write(cpu, frameptr + 24, env->regs[15],
   1272                        mmu_idx, STACK_NORMAL) &&
   1273        v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
   1274
   1275    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
   1276        /* FPU is active, try to save its registers */
   1277        bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
   1278        bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
   1279
   1280        if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1281            qemu_log_mask(CPU_LOG_INT,
   1282                          "...SecureFault because LSPACT and FPCA both set\n");
   1283            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   1284            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1285        } else if (!env->v7m.secure && !nsacr_cp10) {
   1286            qemu_log_mask(CPU_LOG_INT,
   1287                          "...Secure UsageFault with CFSR.NOCP because "
   1288                          "NSACR.CP10 prevents stacking FP regs\n");
   1289            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
   1290            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
   1291        } else {
   1292            if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
   1293                /* Lazy stacking disabled, save registers now */
   1294                int i;
   1295                bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
   1296                                                 arm_current_el(env) != 0);
   1297
   1298                if (stacked_ok && !cpacr_pass) {
   1299                    /*
   1300                     * Take UsageFault if CPACR forbids access. The pseudocode
   1301                     * here does a full CheckCPEnabled() but we know the NSACR
   1302                     * check can never fail as we have already handled that.
   1303                     */
   1304                    qemu_log_mask(CPU_LOG_INT,
   1305                                  "...UsageFault with CFSR.NOCP because "
   1306                                  "CPACR.CP10 prevents stacking FP regs\n");
   1307                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1308                                            env->v7m.secure);
   1309                    env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
   1310                    stacked_ok = false;
   1311                }
   1312
   1313                for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
   1314                    uint64_t dn = *aa32_vfp_dreg(env, i / 2);
   1315                    uint32_t faddr = frameptr + 0x20 + 4 * i;
   1316                    uint32_t slo = extract64(dn, 0, 32);
   1317                    uint32_t shi = extract64(dn, 32, 32);
   1318
   1319                    if (i >= 16) {
   1320                        faddr += 8; /* skip the slot for the FPSCR and VPR */
   1321                    }
   1322                    stacked_ok = stacked_ok &&
   1323                        v7m_stack_write(cpu, faddr, slo,
   1324                                        mmu_idx, STACK_NORMAL) &&
   1325                        v7m_stack_write(cpu, faddr + 4, shi,
   1326                                        mmu_idx, STACK_NORMAL);
   1327                }
   1328                stacked_ok = stacked_ok &&
   1329                    v7m_stack_write(cpu, frameptr + 0x60,
   1330                                    vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
   1331                if (cpu_isar_feature(aa32_mve, cpu)) {
   1332                    stacked_ok = stacked_ok &&
   1333                        v7m_stack_write(cpu, frameptr + 0x64,
   1334                                        env->v7m.vpr, mmu_idx, STACK_NORMAL);
   1335                }
   1336                if (cpacr_pass) {
   1337                    for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
   1338                        *aa32_vfp_dreg(env, i / 2) = 0;
   1339                    }
   1340                    vfp_set_fpscr(env, 0);
   1341                    if (cpu_isar_feature(aa32_mve, cpu)) {
   1342                        env->v7m.vpr = 0;
   1343                    }
   1344                }
   1345            } else {
   1346                /* Lazy stacking enabled, save necessary info to stack later */
   1347                v7m_update_fpccr(env, frameptr + 0x20, true);
   1348            }
   1349        }
   1350    }
   1351
   1352    /*
   1353     * If we broke a stack limit then SP was already updated earlier;
   1354     * otherwise we update SP regardless of whether any of the stack
   1355     * accesses failed or we took some other kind of fault.
   1356     */
   1357    if (!limitviol) {
   1358        env->regs[13] = frameptr;
   1359    }
   1360
   1361    return !stacked_ok;
   1362}
   1363
   1364static void do_v7m_exception_exit(ARMCPU *cpu)
   1365{
   1366    CPUARMState *env = &cpu->env;
   1367    uint32_t excret;
   1368    uint32_t xpsr, xpsr_mask;
   1369    bool ufault = false;
   1370    bool sfault = false;
   1371    bool return_to_sp_process;
   1372    bool return_to_handler;
   1373    bool rettobase = false;
   1374    bool exc_secure = false;
   1375    bool return_to_secure;
   1376    bool ftype;
   1377    bool restore_s16_s31 = false;
   1378
   1379    /*
   1380     * If we're not in Handler mode then jumps to magic exception-exit
   1381     * addresses don't have magic behaviour. However for the v8M
   1382     * security extensions the magic secure-function-return has to
   1383     * work in thread mode too, so to avoid doing an extra check in
   1384     * the generated code we allow exception-exit magic to also cause the
   1385     * internal exception and bring us here in thread mode. Correct code
   1386     * will never try to do this (the following insn fetch will always
   1387     * fault) so we the overhead of having taken an unnecessary exception
   1388     * doesn't matter.
   1389     */
   1390    if (!arm_v7m_is_handler_mode(env)) {
   1391        return;
   1392    }
   1393
   1394    /*
   1395     * In the spec pseudocode ExceptionReturn() is called directly
   1396     * from BXWritePC() and gets the full target PC value including
   1397     * bit zero. In QEMU's implementation we treat it as a normal
   1398     * jump-to-register (which is then caught later on), and so split
   1399     * the target value up between env->regs[15] and env->thumb in
   1400     * gen_bx(). Reconstitute it.
   1401     */
   1402    excret = env->regs[15];
   1403    if (env->thumb) {
   1404        excret |= 1;
   1405    }
   1406
   1407    qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
   1408                  " previous exception %d\n",
   1409                  excret, env->v7m.exception);
   1410
   1411    if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
   1412        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
   1413                      "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
   1414                      excret);
   1415    }
   1416
   1417    ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
   1418
   1419    if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
   1420        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
   1421                      "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
   1422                      "if FPU not present\n",
   1423                      excret);
   1424        ftype = true;
   1425    }
   1426
   1427    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1428        /*
   1429         * EXC_RETURN.ES validation check (R_SMFL). We must do this before
   1430         * we pick which FAULTMASK to clear.
   1431         */
   1432        if (!env->v7m.secure &&
   1433            ((excret & R_V7M_EXCRET_ES_MASK) ||
   1434             !(excret & R_V7M_EXCRET_DCRS_MASK))) {
   1435            sfault = 1;
   1436            /* For all other purposes, treat ES as 0 (R_HXSR) */
   1437            excret &= ~R_V7M_EXCRET_ES_MASK;
   1438        }
   1439        exc_secure = excret & R_V7M_EXCRET_ES_MASK;
   1440    }
   1441
   1442    if (env->v7m.exception != ARMV7M_EXCP_NMI) {
   1443        /*
   1444         * Auto-clear FAULTMASK on return from other than NMI.
   1445         * If the security extension is implemented then this only
   1446         * happens if the raw execution priority is >= 0; the
   1447         * value of the ES bit in the exception return value indicates
   1448         * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
   1449         */
   1450        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1451            if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
   1452                env->v7m.faultmask[exc_secure] = 0;
   1453            }
   1454        } else {
   1455            env->v7m.faultmask[M_REG_NS] = 0;
   1456        }
   1457    }
   1458
   1459    switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
   1460                                     exc_secure)) {
   1461    case -1:
   1462        /* attempt to exit an exception that isn't active */
   1463        ufault = true;
   1464        break;
   1465    case 0:
   1466        /* still an irq active now */
   1467        break;
   1468    case 1:
   1469        /*
   1470         * We returned to base exception level, no nesting.
   1471         * (In the pseudocode this is written using "NestedActivation != 1"
   1472         * where we have 'rettobase == false'.)
   1473         */
   1474        rettobase = true;
   1475        break;
   1476    default:
   1477        g_assert_not_reached();
   1478    }
   1479
   1480    return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
   1481    return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
   1482    return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
   1483        (excret & R_V7M_EXCRET_S_MASK);
   1484
   1485    if (arm_feature(env, ARM_FEATURE_V8)) {
   1486        if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1487            /*
   1488             * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
   1489             * we choose to take the UsageFault.
   1490             */
   1491            if ((excret & R_V7M_EXCRET_S_MASK) ||
   1492                (excret & R_V7M_EXCRET_ES_MASK) ||
   1493                !(excret & R_V7M_EXCRET_DCRS_MASK)) {
   1494                ufault = true;
   1495            }
   1496        }
   1497        if (excret & R_V7M_EXCRET_RES0_MASK) {
   1498            ufault = true;
   1499        }
   1500    } else {
   1501        /* For v7M we only recognize certain combinations of the low bits */
   1502        switch (excret & 0xf) {
   1503        case 1: /* Return to Handler */
   1504            break;
   1505        case 13: /* Return to Thread using Process stack */
   1506        case 9: /* Return to Thread using Main stack */
   1507            /*
   1508             * We only need to check NONBASETHRDENA for v7M, because in
   1509             * v8M this bit does not exist (it is RES1).
   1510             */
   1511            if (!rettobase &&
   1512                !(env->v7m.ccr[env->v7m.secure] &
   1513                  R_V7M_CCR_NONBASETHRDENA_MASK)) {
   1514                ufault = true;
   1515            }
   1516            break;
   1517        default:
   1518            ufault = true;
   1519        }
   1520    }
   1521
   1522    /*
   1523     * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
   1524     * Handler mode (and will be until we write the new XPSR.Interrupt
   1525     * field) this does not switch around the current stack pointer.
   1526     * We must do this before we do any kind of tailchaining, including
   1527     * for the derived exceptions on integrity check failures, or we will
   1528     * give the guest an incorrect EXCRET.SPSEL value on exception entry.
   1529     */
   1530    write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
   1531
   1532    /*
   1533     * Clear scratch FP values left in caller saved registers; this
   1534     * must happen before any kind of tail chaining.
   1535     */
   1536    if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
   1537        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
   1538        if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
   1539            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   1540            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1541            qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
   1542                          "stackframe: error during lazy state deactivation\n");
   1543            v7m_exception_taken(cpu, excret, true, false);
   1544            return;
   1545        } else {
   1546            if (arm_feature(env, ARM_FEATURE_V8_1M)) {
   1547                /* v8.1M adds this NOCP check */
   1548                bool nsacr_pass = exc_secure ||
   1549                    extract32(env->v7m.nsacr, 10, 1);
   1550                bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
   1551                if (!nsacr_pass) {
   1552                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
   1553                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
   1554                    qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1555                        "stackframe: NSACR prevents clearing FPU registers\n");
   1556                    v7m_exception_taken(cpu, excret, true, false);
   1557                    return;
   1558                } else if (!cpacr_pass) {
   1559                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1560                                            exc_secure);
   1561                    env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
   1562                    qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1563                        "stackframe: CPACR prevents clearing FPU registers\n");
   1564                    v7m_exception_taken(cpu, excret, true, false);
   1565                    return;
   1566                }
   1567            }
   1568            /* Clear s0..s15, FPSCR and VPR */
   1569            int i;
   1570
   1571            for (i = 0; i < 16; i += 2) {
   1572                *aa32_vfp_dreg(env, i / 2) = 0;
   1573            }
   1574            vfp_set_fpscr(env, 0);
   1575            if (cpu_isar_feature(aa32_mve, cpu)) {
   1576                env->v7m.vpr = 0;
   1577            }
   1578        }
   1579    }
   1580
   1581    if (sfault) {
   1582        env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
   1583        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1584        qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
   1585                      "stackframe: failed EXC_RETURN.ES validity check\n");
   1586        v7m_exception_taken(cpu, excret, true, false);
   1587        return;
   1588    }
   1589
   1590    if (ufault) {
   1591        /*
   1592         * Bad exception return: instead of popping the exception
   1593         * stack, directly take a usage fault on the current stack.
   1594         */
   1595        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1596        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   1597        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1598                      "stackframe: failed exception return integrity check\n");
   1599        v7m_exception_taken(cpu, excret, true, false);
   1600        return;
   1601    }
   1602
   1603    /*
   1604     * Tailchaining: if there is currently a pending exception that
   1605     * is high enough priority to preempt execution at the level we're
   1606     * about to return to, then just directly take that exception now,
   1607     * avoiding an unstack-and-then-stack. Note that now we have
   1608     * deactivated the previous exception by calling armv7m_nvic_complete_irq()
   1609     * our current execution priority is already the execution priority we are
   1610     * returning to -- none of the state we would unstack or set based on
   1611     * the EXCRET value affects it.
   1612     */
   1613    if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
   1614        qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
   1615        v7m_exception_taken(cpu, excret, true, false);
   1616        return;
   1617    }
   1618
   1619    switch_v7m_security_state(env, return_to_secure);
   1620
   1621    {
   1622        /*
   1623         * The stack pointer we should be reading the exception frame from
   1624         * depends on bits in the magic exception return type value (and
   1625         * for v8M isn't necessarily the stack pointer we will eventually
   1626         * end up resuming execution with). Get a pointer to the location
   1627         * in the CPU state struct where the SP we need is currently being
   1628         * stored; we will use and modify it in place.
   1629         * We use this limited C variable scope so we don't accidentally
   1630         * use 'frame_sp_p' after we do something that makes it invalid.
   1631         */
   1632        bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
   1633        uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
   1634                                              return_to_secure,
   1635                                              !return_to_handler,
   1636                                              spsel);
   1637        uint32_t frameptr = *frame_sp_p;
   1638        bool pop_ok = true;
   1639        ARMMMUIdx mmu_idx;
   1640        bool return_to_priv = return_to_handler ||
   1641            !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
   1642
   1643        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
   1644                                                        return_to_priv);
   1645
   1646        if (!QEMU_IS_ALIGNED(frameptr, 8) &&
   1647            arm_feature(env, ARM_FEATURE_V8)) {
   1648            qemu_log_mask(LOG_GUEST_ERROR,
   1649                          "M profile exception return with non-8-aligned SP "
   1650                          "for destination state is UNPREDICTABLE\n");
   1651        }
   1652
   1653        /* Do we need to pop callee-saved registers? */
   1654        if (return_to_secure &&
   1655            ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
   1656             (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
   1657            uint32_t actual_sig;
   1658
   1659            pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
   1660
   1661            if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
   1662                /* Take a SecureFault on the current stack */
   1663                env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
   1664                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1665                qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
   1666                              "stackframe: failed exception return integrity "
   1667                              "signature check\n");
   1668                v7m_exception_taken(cpu, excret, true, false);
   1669                return;
   1670            }
   1671
   1672            pop_ok = pop_ok &&
   1673                v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
   1674                v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
   1675                v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
   1676                v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
   1677                v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
   1678                v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
   1679                v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
   1680                v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
   1681
   1682            frameptr += 0x28;
   1683        }
   1684
   1685        /* Pop registers */
   1686        pop_ok = pop_ok &&
   1687            v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
   1688            v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
   1689            v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
   1690            v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
   1691            v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
   1692            v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
   1693            v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
   1694            v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
   1695
   1696        if (!pop_ok) {
   1697            /*
   1698             * v7m_stack_read() pended a fault, so take it (as a tail
   1699             * chained exception on the same stack frame)
   1700             */
   1701            qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
   1702            v7m_exception_taken(cpu, excret, true, false);
   1703            return;
   1704        }
   1705
   1706        /*
   1707         * Returning from an exception with a PC with bit 0 set is defined
   1708         * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
   1709         * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
   1710         * the lsbit, and there are several RTOSes out there which incorrectly
   1711         * assume the r15 in the stack frame should be a Thumb-style "lsbit
   1712         * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
   1713         * complain about the badly behaved guest.
   1714         */
   1715        if (env->regs[15] & 1) {
   1716            env->regs[15] &= ~1U;
   1717            if (!arm_feature(env, ARM_FEATURE_V8)) {
   1718                qemu_log_mask(LOG_GUEST_ERROR,
   1719                              "M profile return from interrupt with misaligned "
   1720                              "PC is UNPREDICTABLE on v7M\n");
   1721            }
   1722        }
   1723
   1724        if (arm_feature(env, ARM_FEATURE_V8)) {
   1725            /*
   1726             * For v8M we have to check whether the xPSR exception field
   1727             * matches the EXCRET value for return to handler/thread
   1728             * before we commit to changing the SP and xPSR.
   1729             */
   1730            bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
   1731            if (return_to_handler != will_be_handler) {
   1732                /*
   1733                 * Take an INVPC UsageFault on the current stack.
   1734                 * By this point we will have switched to the security state
   1735                 * for the background state, so this UsageFault will target
   1736                 * that state.
   1737                 */
   1738                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1739                                        env->v7m.secure);
   1740                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1741                qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1742                              "stackframe: failed exception return integrity "
   1743                              "check\n");
   1744                v7m_exception_taken(cpu, excret, true, false);
   1745                return;
   1746            }
   1747        }
   1748
   1749        if (!ftype) {
   1750            /* FP present and we need to handle it */
   1751            if (!return_to_secure &&
   1752                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
   1753                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1754                env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   1755                qemu_log_mask(CPU_LOG_INT,
   1756                              "...taking SecureFault on existing stackframe: "
   1757                              "Secure LSPACT set but exception return is "
   1758                              "not to secure state\n");
   1759                v7m_exception_taken(cpu, excret, true, false);
   1760                return;
   1761            }
   1762
   1763            restore_s16_s31 = return_to_secure &&
   1764                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
   1765
   1766            if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
   1767                /* State in FPU is still valid, just clear LSPACT */
   1768                env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
   1769            } else {
   1770                int i;
   1771                uint32_t fpscr;
   1772                bool cpacr_pass, nsacr_pass;
   1773
   1774                cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
   1775                                            return_to_priv);
   1776                nsacr_pass = return_to_secure ||
   1777                    extract32(env->v7m.nsacr, 10, 1);
   1778
   1779                if (!cpacr_pass) {
   1780                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1781                                            return_to_secure);
   1782                    env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
   1783                    qemu_log_mask(CPU_LOG_INT,
   1784                                  "...taking UsageFault on existing "
   1785                                  "stackframe: CPACR.CP10 prevents unstacking "
   1786                                  "FP regs\n");
   1787                    v7m_exception_taken(cpu, excret, true, false);
   1788                    return;
   1789                } else if (!nsacr_pass) {
   1790                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
   1791                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
   1792                    qemu_log_mask(CPU_LOG_INT,
   1793                                  "...taking Secure UsageFault on existing "
   1794                                  "stackframe: NSACR.CP10 prevents unstacking "
   1795                                  "FP regs\n");
   1796                    v7m_exception_taken(cpu, excret, true, false);
   1797                    return;
   1798                }
   1799
   1800                for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
   1801                    uint32_t slo, shi;
   1802                    uint64_t dn;
   1803                    uint32_t faddr = frameptr + 0x20 + 4 * i;
   1804
   1805                    if (i >= 16) {
   1806                        faddr += 8; /* Skip the slot for the FPSCR and VPR */
   1807                    }
   1808
   1809                    pop_ok = pop_ok &&
   1810                        v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
   1811                        v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
   1812
   1813                    if (!pop_ok) {
   1814                        break;
   1815                    }
   1816
   1817                    dn = (uint64_t)shi << 32 | slo;
   1818                    *aa32_vfp_dreg(env, i / 2) = dn;
   1819                }
   1820                pop_ok = pop_ok &&
   1821                    v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
   1822                if (pop_ok) {
   1823                    vfp_set_fpscr(env, fpscr);
   1824                }
   1825                if (cpu_isar_feature(aa32_mve, cpu)) {
   1826                    pop_ok = pop_ok &&
   1827                        v7m_stack_read(cpu, &env->v7m.vpr,
   1828                                       frameptr + 0x64, mmu_idx);
   1829                }
   1830                if (!pop_ok) {
   1831                    /*
   1832                     * These regs are 0 if security extension present;
   1833                     * otherwise merely UNKNOWN. We zero always.
   1834                     */
   1835                    for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
   1836                        *aa32_vfp_dreg(env, i / 2) = 0;
   1837                    }
   1838                    vfp_set_fpscr(env, 0);
   1839                    if (cpu_isar_feature(aa32_mve, cpu)) {
   1840                        env->v7m.vpr = 0;
   1841                    }
   1842                }
   1843            }
   1844        }
   1845        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
   1846                                               V7M_CONTROL, FPCA, !ftype);
   1847
   1848        /* Commit to consuming the stack frame */
   1849        frameptr += 0x20;
   1850        if (!ftype) {
   1851            frameptr += 0x48;
   1852            if (restore_s16_s31) {
   1853                frameptr += 0x40;
   1854            }
   1855        }
   1856        /*
   1857         * Undo stack alignment (the SPREALIGN bit indicates that the original
   1858         * pre-exception SP was not 8-aligned and we added a padding word to
   1859         * align it, so we undo this by ORing in the bit that increases it
   1860         * from the current 8-aligned value to the 8-unaligned value. (Adding 4
   1861         * would work too but a logical OR is how the pseudocode specifies it.)
   1862         */
   1863        if (xpsr & XPSR_SPREALIGN) {
   1864            frameptr |= 4;
   1865        }
   1866        *frame_sp_p = frameptr;
   1867    }
   1868
   1869    xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
   1870    if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
   1871        xpsr_mask &= ~XPSR_GE;
   1872    }
   1873    /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
   1874    xpsr_write(env, xpsr, xpsr_mask);
   1875
   1876    if (env->v7m.secure) {
   1877        bool sfpa = xpsr & XPSR_SFPA;
   1878
   1879        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
   1880                                               V7M_CONTROL, SFPA, sfpa);
   1881    }
   1882
   1883    /*
   1884     * The restored xPSR exception field will be zero if we're
   1885     * resuming in Thread mode. If that doesn't match what the
   1886     * exception return excret specified then this is a UsageFault.
   1887     * v7M requires we make this check here; v8M did it earlier.
   1888     */
   1889    if (return_to_handler != arm_v7m_is_handler_mode(env)) {
   1890        /*
   1891         * Take an INVPC UsageFault by pushing the stack again;
   1892         * we know we're v7M so this is never a Secure UsageFault.
   1893         */
   1894        bool ignore_stackfaults;
   1895
   1896        assert(!arm_feature(env, ARM_FEATURE_V8));
   1897        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
   1898        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1899        ignore_stackfaults = v7m_push_stack(cpu);
   1900        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
   1901                      "failed exception return integrity check\n");
   1902        v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
   1903        return;
   1904    }
   1905
   1906    /* Otherwise, we have a successful exception exit. */
   1907    arm_clear_exclusive(env);
   1908    arm_rebuild_hflags(env);
   1909    qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
   1910}
   1911
   1912static bool do_v7m_function_return(ARMCPU *cpu)
   1913{
   1914    /*
   1915     * v8M security extensions magic function return.
   1916     * We may either:
   1917     *  (1) throw an exception (longjump)
   1918     *  (2) return true if we successfully handled the function return
   1919     *  (3) return false if we failed a consistency check and have
   1920     *      pended a UsageFault that needs to be taken now
   1921     *
   1922     * At this point the magic return value is split between env->regs[15]
   1923     * and env->thumb. We don't bother to reconstitute it because we don't
   1924     * need it (all values are handled the same way).
   1925     */
   1926    CPUARMState *env = &cpu->env;
   1927    uint32_t newpc, newpsr, newpsr_exc;
   1928
   1929    qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
   1930
   1931    {
   1932        bool threadmode, spsel;
   1933        MemOpIdx oi;
   1934        ARMMMUIdx mmu_idx;
   1935        uint32_t *frame_sp_p;
   1936        uint32_t frameptr;
   1937
   1938        /* Pull the return address and IPSR from the Secure stack */
   1939        threadmode = !arm_v7m_is_handler_mode(env);
   1940        spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
   1941
   1942        frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
   1943        frameptr = *frame_sp_p;
   1944
   1945        /*
   1946         * These loads may throw an exception (for MPU faults). We want to
   1947         * do them as secure, so work out what MMU index that is.
   1948         */
   1949        mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
   1950        oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
   1951        newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
   1952        newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
   1953
   1954        /* Consistency checks on new IPSR */
   1955        newpsr_exc = newpsr & XPSR_EXCP;
   1956        if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
   1957              (env->v7m.exception == 1 && newpsr_exc != 0))) {
   1958            /* Pend the fault and tell our caller to take it */
   1959            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1960            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1961                                    env->v7m.secure);
   1962            qemu_log_mask(CPU_LOG_INT,
   1963                          "...taking INVPC UsageFault: "
   1964                          "IPSR consistency check failed\n");
   1965            return false;
   1966        }
   1967
   1968        *frame_sp_p = frameptr + 8;
   1969    }
   1970
   1971    /* This invalidates frame_sp_p */
   1972    switch_v7m_security_state(env, true);
   1973    env->v7m.exception = newpsr_exc;
   1974    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
   1975    if (newpsr & XPSR_SFPA) {
   1976        env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
   1977    }
   1978    xpsr_write(env, 0, XPSR_IT);
   1979    env->thumb = newpc & 1;
   1980    env->regs[15] = newpc & ~1;
   1981    arm_rebuild_hflags(env);
   1982
   1983    qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
   1984    return true;
   1985}
   1986
   1987static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
   1988                               uint32_t addr, uint16_t *insn)
   1989{
   1990    /*
   1991     * Load a 16-bit portion of a v7M instruction, returning true on success,
   1992     * or false on failure (in which case we will have pended the appropriate
   1993     * exception).
   1994     * We need to do the instruction fetch's MPU and SAU checks
   1995     * like this because there is no MMU index that would allow
   1996     * doing the load with a single function call. Instead we must
   1997     * first check that the security attributes permit the load
   1998     * and that they don't mismatch on the two halves of the instruction,
   1999     * and then we do the load as a secure load (ie using the security
   2000     * attributes of the address, not the CPU, as architecturally required).
   2001     */
   2002    CPUState *cs = CPU(cpu);
   2003    CPUARMState *env = &cpu->env;
   2004    V8M_SAttributes sattrs = {};
   2005    MemTxAttrs attrs = {};
   2006    ARMMMUFaultInfo fi = {};
   2007    ARMCacheAttrs cacheattrs = {};
   2008    MemTxResult txres;
   2009    target_ulong page_size;
   2010    hwaddr physaddr;
   2011    int prot;
   2012
   2013    v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
   2014    if (!sattrs.nsc || sattrs.ns) {
   2015        /*
   2016         * This must be the second half of the insn, and it straddles a
   2017         * region boundary with the second half not being S&NSC.
   2018         */
   2019        env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
   2020        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2021        qemu_log_mask(CPU_LOG_INT,
   2022                      "...really SecureFault with SFSR.INVEP\n");
   2023        return false;
   2024    }
   2025    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
   2026                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
   2027        /* the MPU lookup failed */
   2028        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
   2029        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
   2030        qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
   2031        return false;
   2032    }
   2033    *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
   2034                                 attrs, &txres);
   2035    if (txres != MEMTX_OK) {
   2036        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
   2037        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
   2038        qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
   2039        return false;
   2040    }
   2041    return true;
   2042}
   2043
   2044static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
   2045                                   uint32_t addr, uint32_t *spdata)
   2046{
   2047    /*
   2048     * Read a word of data from the stack for the SG instruction,
   2049     * writing the value into *spdata. If the load succeeds, return
   2050     * true; otherwise pend an appropriate exception and return false.
   2051     * (We can't use data load helpers here that throw an exception
   2052     * because of the context we're called in, which is halfway through
   2053     * arm_v7m_cpu_do_interrupt().)
   2054     */
   2055    CPUState *cs = CPU(cpu);
   2056    CPUARMState *env = &cpu->env;
   2057    MemTxAttrs attrs = {};
   2058    MemTxResult txres;
   2059    target_ulong page_size;
   2060    hwaddr physaddr;
   2061    int prot;
   2062    ARMMMUFaultInfo fi = {};
   2063    ARMCacheAttrs cacheattrs = {};
   2064    uint32_t value;
   2065
   2066    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
   2067                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
   2068        /* MPU/SAU lookup failed */
   2069        if (fi.type == ARMFault_QEMU_SFault) {
   2070            qemu_log_mask(CPU_LOG_INT,
   2071                          "...SecureFault during stack word read\n");
   2072            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
   2073            env->v7m.sfar = addr;
   2074            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2075        } else {
   2076            qemu_log_mask(CPU_LOG_INT,
   2077                          "...MemManageFault during stack word read\n");
   2078            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
   2079                R_V7M_CFSR_MMARVALID_MASK;
   2080            env->v7m.mmfar[M_REG_S] = addr;
   2081            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
   2082        }
   2083        return false;
   2084    }
   2085    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
   2086                              attrs, &txres);
   2087    if (txres != MEMTX_OK) {
   2088        /* BusFault trying to read the data */
   2089        qemu_log_mask(CPU_LOG_INT,
   2090                      "...BusFault during stack word read\n");
   2091        env->v7m.cfsr[M_REG_NS] |=
   2092            (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
   2093        env->v7m.bfar = addr;
   2094        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
   2095        return false;
   2096    }
   2097
   2098    *spdata = value;
   2099    return true;
   2100}
   2101
   2102static bool v7m_handle_execute_nsc(ARMCPU *cpu)
   2103{
   2104    /*
   2105     * Check whether this attempt to execute code in a Secure & NS-Callable
   2106     * memory region is for an SG instruction; if so, then emulate the
   2107     * effect of the SG instruction and return true. Otherwise pend
   2108     * the correct kind of exception and return false.
   2109     */
   2110    CPUARMState *env = &cpu->env;
   2111    ARMMMUIdx mmu_idx;
   2112    uint16_t insn;
   2113
   2114    /*
   2115     * We should never get here unless get_phys_addr_pmsav8() caused
   2116     * an exception for NS executing in S&NSC memory.
   2117     */
   2118    assert(!env->v7m.secure);
   2119    assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
   2120
   2121    /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
   2122    mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
   2123
   2124    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
   2125        return false;
   2126    }
   2127
   2128    if (!env->thumb) {
   2129        goto gen_invep;
   2130    }
   2131
   2132    if (insn != 0xe97f) {
   2133        /*
   2134         * Not an SG instruction first half (we choose the IMPDEF
   2135         * early-SG-check option).
   2136         */
   2137        goto gen_invep;
   2138    }
   2139
   2140    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
   2141        return false;
   2142    }
   2143
   2144    if (insn != 0xe97f) {
   2145        /*
   2146         * Not an SG instruction second half (yes, both halves of the SG
   2147         * insn have the same hex value)
   2148         */
   2149        goto gen_invep;
   2150    }
   2151
   2152    /*
   2153     * OK, we have confirmed that we really have an SG instruction.
   2154     * We know we're NS in S memory so don't need to repeat those checks.
   2155     */
   2156    qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
   2157                  ", executing it\n", env->regs[15]);
   2158
   2159    if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
   2160        !arm_v7m_is_handler_mode(env)) {
   2161        /*
   2162         * v8.1M exception stack frame integrity check. Note that we
   2163         * must perform the memory access even if CCR_S.TRD is zero
   2164         * and we aren't going to check what the data loaded is.
   2165         */
   2166        uint32_t spdata, sp;
   2167
   2168        /*
   2169         * We know we are currently NS, so the S stack pointers must be
   2170         * in other_ss_{psp,msp}, not in regs[13]/other_sp.
   2171         */
   2172        sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
   2173        if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
   2174            /* Stack access failed and an exception has been pended */
   2175            return false;
   2176        }
   2177
   2178        if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
   2179            if (((spdata & ~1) == 0xfefa125a) ||
   2180                !(env->v7m.control[M_REG_S] & 1)) {
   2181                goto gen_invep;
   2182            }
   2183        }
   2184    }
   2185
   2186    env->regs[14] &= ~1;
   2187    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
   2188    switch_v7m_security_state(env, true);
   2189    xpsr_write(env, 0, XPSR_IT);
   2190    env->regs[15] += 4;
   2191    arm_rebuild_hflags(env);
   2192    return true;
   2193
   2194gen_invep:
   2195    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
   2196    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2197    qemu_log_mask(CPU_LOG_INT,
   2198                  "...really SecureFault with SFSR.INVEP\n");
   2199    return false;
   2200}
   2201
   2202void arm_v7m_cpu_do_interrupt(CPUState *cs)
   2203{
   2204    ARMCPU *cpu = ARM_CPU(cs);
   2205    CPUARMState *env = &cpu->env;
   2206    uint32_t lr;
   2207    bool ignore_stackfaults;
   2208
   2209    arm_log_exception(cs->exception_index);
   2210
   2211    /*
   2212     * For exceptions we just mark as pending on the NVIC, and let that
   2213     * handle it.
   2214     */
   2215    switch (cs->exception_index) {
   2216    case EXCP_UDEF:
   2217        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2218        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
   2219        break;
   2220    case EXCP_NOCP:
   2221    {
   2222        /*
   2223         * NOCP might be directed to something other than the current
   2224         * security state if this fault is because of NSACR; we indicate
   2225         * the target security state using exception.target_el.
   2226         */
   2227        int target_secstate;
   2228
   2229        if (env->exception.target_el == 3) {
   2230            target_secstate = M_REG_S;
   2231        } else {
   2232            target_secstate = env->v7m.secure;
   2233        }
   2234        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
   2235        env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
   2236        break;
   2237    }
   2238    case EXCP_INVSTATE:
   2239        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2240        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
   2241        break;
   2242    case EXCP_STKOF:
   2243        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2244        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
   2245        break;
   2246    case EXCP_LSERR:
   2247        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2248        env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   2249        break;
   2250    case EXCP_UNALIGNED:
   2251        /* Unaligned faults reported by M-profile aware code */
   2252        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2253        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
   2254        break;
   2255    case EXCP_DIVBYZERO:
   2256        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2257        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
   2258        break;
   2259    case EXCP_SWI:
   2260        /* The PC already points to the next instruction.  */
   2261        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
   2262        break;
   2263    case EXCP_PREFETCH_ABORT:
   2264    case EXCP_DATA_ABORT:
   2265        /*
   2266         * Note that for M profile we don't have a guest facing FSR, but
   2267         * the env->exception.fsr will be populated by the code that
   2268         * raises the fault, in the A profile short-descriptor format.
   2269         */
   2270        switch (env->exception.fsr & 0xf) {
   2271        case M_FAKE_FSR_NSC_EXEC:
   2272            /*
   2273             * Exception generated when we try to execute code at an address
   2274             * which is marked as Secure & Non-Secure Callable and the CPU
   2275             * is in the Non-Secure state. The only instruction which can
   2276             * be executed like this is SG (and that only if both halves of
   2277             * the SG instruction have the same security attributes.)
   2278             * Everything else must generate an INVEP SecureFault, so we
   2279             * emulate the SG instruction here.
   2280             */
   2281            if (v7m_handle_execute_nsc(cpu)) {
   2282                return;
   2283            }
   2284            break;
   2285        case M_FAKE_FSR_SFAULT:
   2286            /*
   2287             * Various flavours of SecureFault for attempts to execute or
   2288             * access data in the wrong security state.
   2289             */
   2290            switch (cs->exception_index) {
   2291            case EXCP_PREFETCH_ABORT:
   2292                if (env->v7m.secure) {
   2293                    env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
   2294                    qemu_log_mask(CPU_LOG_INT,
   2295                                  "...really SecureFault with SFSR.INVTRAN\n");
   2296                } else {
   2297                    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
   2298                    qemu_log_mask(CPU_LOG_INT,
   2299                                  "...really SecureFault with SFSR.INVEP\n");
   2300                }
   2301                break;
   2302            case EXCP_DATA_ABORT:
   2303                /* This must be an NS access to S memory */
   2304                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
   2305                qemu_log_mask(CPU_LOG_INT,
   2306                              "...really SecureFault with SFSR.AUVIOL\n");
   2307                break;
   2308            }
   2309            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2310            break;
   2311        case 0x8: /* External Abort */
   2312            switch (cs->exception_index) {
   2313            case EXCP_PREFETCH_ABORT:
   2314                env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
   2315                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
   2316                break;
   2317            case EXCP_DATA_ABORT:
   2318                env->v7m.cfsr[M_REG_NS] |=
   2319                    (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
   2320                env->v7m.bfar = env->exception.vaddress;
   2321                qemu_log_mask(CPU_LOG_INT,
   2322                              "...with CFSR.PRECISERR and BFAR 0x%x\n",
   2323                              env->v7m.bfar);
   2324                break;
   2325            }
   2326            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
   2327            break;
   2328        case 0x1: /* Alignment fault reported by generic code */
   2329            qemu_log_mask(CPU_LOG_INT,
   2330                          "...really UsageFault with UFSR.UNALIGNED\n");
   2331            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
   2332            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   2333                                    env->v7m.secure);
   2334            break;
   2335        default:
   2336            /*
   2337             * All other FSR values are either MPU faults or "can't happen
   2338             * for M profile" cases.
   2339             */
   2340            switch (cs->exception_index) {
   2341            case EXCP_PREFETCH_ABORT:
   2342                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
   2343                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
   2344                break;
   2345            case EXCP_DATA_ABORT:
   2346                env->v7m.cfsr[env->v7m.secure] |=
   2347                    (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
   2348                env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
   2349                qemu_log_mask(CPU_LOG_INT,
   2350                              "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
   2351                              env->v7m.mmfar[env->v7m.secure]);
   2352                break;
   2353            }
   2354            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
   2355                                    env->v7m.secure);
   2356            break;
   2357        }
   2358        break;
   2359    case EXCP_SEMIHOST:
   2360        qemu_log_mask(CPU_LOG_INT,
   2361                      "...handling as semihosting call 0x%x\n",
   2362                      env->regs[0]);
   2363#ifdef CONFIG_TCG
   2364        env->regs[0] = do_common_semihosting(cs);
   2365#else
   2366        g_assert_not_reached();
   2367#endif
   2368        env->regs[15] += env->thumb ? 2 : 4;
   2369        return;
   2370    case EXCP_BKPT:
   2371        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
   2372        break;
   2373    case EXCP_IRQ:
   2374        break;
   2375    case EXCP_EXCEPTION_EXIT:
   2376        if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
   2377            /* Must be v8M security extension function return */
   2378            assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
   2379            assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
   2380            if (do_v7m_function_return(cpu)) {
   2381                return;
   2382            }
   2383        } else {
   2384            do_v7m_exception_exit(cpu);
   2385            return;
   2386        }
   2387        break;
   2388    case EXCP_LAZYFP:
   2389        /*
   2390         * We already pended the specific exception in the NVIC in the
   2391         * v7m_preserve_fp_state() helper function.
   2392         */
   2393        break;
   2394    default:
   2395        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
   2396        return; /* Never happens.  Keep compiler happy.  */
   2397    }
   2398
   2399    if (arm_feature(env, ARM_FEATURE_V8)) {
   2400        lr = R_V7M_EXCRET_RES1_MASK |
   2401            R_V7M_EXCRET_DCRS_MASK;
   2402        /*
   2403         * The S bit indicates whether we should return to Secure
   2404         * or NonSecure (ie our current state).
   2405         * The ES bit indicates whether we're taking this exception
   2406         * to Secure or NonSecure (ie our target state). We set it
   2407         * later, in v7m_exception_taken().
   2408         * The SPSEL bit is also set in v7m_exception_taken() for v8M.
   2409         * This corresponds to the ARM ARM pseudocode for v8M setting
   2410         * some LR bits in PushStack() and some in ExceptionTaken();
   2411         * the distinction matters for the tailchain cases where we
   2412         * can take an exception without pushing the stack.
   2413         */
   2414        if (env->v7m.secure) {
   2415            lr |= R_V7M_EXCRET_S_MASK;
   2416        }
   2417    } else {
   2418        lr = R_V7M_EXCRET_RES1_MASK |
   2419            R_V7M_EXCRET_S_MASK |
   2420            R_V7M_EXCRET_DCRS_MASK |
   2421            R_V7M_EXCRET_ES_MASK;
   2422        if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
   2423            lr |= R_V7M_EXCRET_SPSEL_MASK;
   2424        }
   2425    }
   2426    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
   2427        lr |= R_V7M_EXCRET_FTYPE_MASK;
   2428    }
   2429    if (!arm_v7m_is_handler_mode(env)) {
   2430        lr |= R_V7M_EXCRET_MODE_MASK;
   2431    }
   2432
   2433    ignore_stackfaults = v7m_push_stack(cpu);
   2434    v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
   2435}
   2436
   2437uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
   2438{
   2439    unsigned el = arm_current_el(env);
   2440
   2441    /* First handle registers which unprivileged can read */
   2442    switch (reg) {
   2443    case 0 ... 7: /* xPSR sub-fields */
   2444        return v7m_mrs_xpsr(env, reg, el);
   2445    case 20: /* CONTROL */
   2446        return v7m_mrs_control(env, env->v7m.secure);
   2447    case 0x94: /* CONTROL_NS */
   2448        /*
   2449         * We have to handle this here because unprivileged Secure code
   2450         * can read the NS CONTROL register.
   2451         */
   2452        if (!env->v7m.secure) {
   2453            return 0;
   2454        }
   2455        return env->v7m.control[M_REG_NS] |
   2456            (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
   2457    }
   2458
   2459    if (el == 0) {
   2460        return 0; /* unprivileged reads others as zero */
   2461    }
   2462
   2463    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   2464        switch (reg) {
   2465        case 0x88: /* MSP_NS */
   2466            if (!env->v7m.secure) {
   2467                return 0;
   2468            }
   2469            return env->v7m.other_ss_msp;
   2470        case 0x89: /* PSP_NS */
   2471            if (!env->v7m.secure) {
   2472                return 0;
   2473            }
   2474            return env->v7m.other_ss_psp;
   2475        case 0x8a: /* MSPLIM_NS */
   2476            if (!env->v7m.secure) {
   2477                return 0;
   2478            }
   2479            return env->v7m.msplim[M_REG_NS];
   2480        case 0x8b: /* PSPLIM_NS */
   2481            if (!env->v7m.secure) {
   2482                return 0;
   2483            }
   2484            return env->v7m.psplim[M_REG_NS];
   2485        case 0x90: /* PRIMASK_NS */
   2486            if (!env->v7m.secure) {
   2487                return 0;
   2488            }
   2489            return env->v7m.primask[M_REG_NS];
   2490        case 0x91: /* BASEPRI_NS */
   2491            if (!env->v7m.secure) {
   2492                return 0;
   2493            }
   2494            return env->v7m.basepri[M_REG_NS];
   2495        case 0x93: /* FAULTMASK_NS */
   2496            if (!env->v7m.secure) {
   2497                return 0;
   2498            }
   2499            return env->v7m.faultmask[M_REG_NS];
   2500        case 0x98: /* SP_NS */
   2501        {
   2502            /*
   2503             * This gives the non-secure SP selected based on whether we're
   2504             * currently in handler mode or not, using the NS CONTROL.SPSEL.
   2505             */
   2506            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
   2507
   2508            if (!env->v7m.secure) {
   2509                return 0;
   2510            }
   2511            if (!arm_v7m_is_handler_mode(env) && spsel) {
   2512                return env->v7m.other_ss_psp;
   2513            } else {
   2514                return env->v7m.other_ss_msp;
   2515            }
   2516        }
   2517        default:
   2518            break;
   2519        }
   2520    }
   2521
   2522    switch (reg) {
   2523    case 8: /* MSP */
   2524        return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
   2525    case 9: /* PSP */
   2526        return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
   2527    case 10: /* MSPLIM */
   2528        if (!arm_feature(env, ARM_FEATURE_V8)) {
   2529            goto bad_reg;
   2530        }
   2531        return env->v7m.msplim[env->v7m.secure];
   2532    case 11: /* PSPLIM */
   2533        if (!arm_feature(env, ARM_FEATURE_V8)) {
   2534            goto bad_reg;
   2535        }
   2536        return env->v7m.psplim[env->v7m.secure];
   2537    case 16: /* PRIMASK */
   2538        return env->v7m.primask[env->v7m.secure];
   2539    case 17: /* BASEPRI */
   2540    case 18: /* BASEPRI_MAX */
   2541        return env->v7m.basepri[env->v7m.secure];
   2542    case 19: /* FAULTMASK */
   2543        return env->v7m.faultmask[env->v7m.secure];
   2544    default:
   2545    bad_reg:
   2546        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
   2547                                       " register %d\n", reg);
   2548        return 0;
   2549    }
   2550}
   2551
   2552void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
   2553{
   2554    /*
   2555     * We're passed bits [11..0] of the instruction; extract
   2556     * SYSm and the mask bits.
   2557     * Invalid combinations of SYSm and mask are UNPREDICTABLE;
   2558     * we choose to treat them as if the mask bits were valid.
   2559     * NB that the pseudocode 'mask' variable is bits [11..10],
   2560     * whereas ours is [11..8].
   2561     */
   2562    uint32_t mask = extract32(maskreg, 8, 4);
   2563    uint32_t reg = extract32(maskreg, 0, 8);
   2564    int cur_el = arm_current_el(env);
   2565
   2566    if (cur_el == 0 && reg > 7 && reg != 20) {
   2567        /*
   2568         * only xPSR sub-fields and CONTROL.SFPA may be written by
   2569         * unprivileged code
   2570         */
   2571        return;
   2572    }
   2573
   2574    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   2575        switch (reg) {
   2576        case 0x88: /* MSP_NS */
   2577            if (!env->v7m.secure) {
   2578                return;
   2579            }
   2580            env->v7m.other_ss_msp = val & ~3;
   2581            return;
   2582        case 0x89: /* PSP_NS */
   2583            if (!env->v7m.secure) {
   2584                return;
   2585            }
   2586            env->v7m.other_ss_psp = val & ~3;
   2587            return;
   2588        case 0x8a: /* MSPLIM_NS */
   2589            if (!env->v7m.secure) {
   2590                return;
   2591            }
   2592            env->v7m.msplim[M_REG_NS] = val & ~7;
   2593            return;
   2594        case 0x8b: /* PSPLIM_NS */
   2595            if (!env->v7m.secure) {
   2596                return;
   2597            }
   2598            env->v7m.psplim[M_REG_NS] = val & ~7;
   2599            return;
   2600        case 0x90: /* PRIMASK_NS */
   2601            if (!env->v7m.secure) {
   2602                return;
   2603            }
   2604            env->v7m.primask[M_REG_NS] = val & 1;
   2605            return;
   2606        case 0x91: /* BASEPRI_NS */
   2607            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2608                return;
   2609            }
   2610            env->v7m.basepri[M_REG_NS] = val & 0xff;
   2611            return;
   2612        case 0x93: /* FAULTMASK_NS */
   2613            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2614                return;
   2615            }
   2616            env->v7m.faultmask[M_REG_NS] = val & 1;
   2617            return;
   2618        case 0x94: /* CONTROL_NS */
   2619            if (!env->v7m.secure) {
   2620                return;
   2621            }
   2622            write_v7m_control_spsel_for_secstate(env,
   2623                                                 val & R_V7M_CONTROL_SPSEL_MASK,
   2624                                                 M_REG_NS);
   2625            if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2626                env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
   2627                env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
   2628            }
   2629            /*
   2630             * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
   2631             * RES0 if the FPU is not present, and is stored in the S bank
   2632             */
   2633            if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
   2634                extract32(env->v7m.nsacr, 10, 1)) {
   2635                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
   2636                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
   2637            }
   2638            return;
   2639        case 0x98: /* SP_NS */
   2640        {
   2641            /*
   2642             * This gives the non-secure SP selected based on whether we're
   2643             * currently in handler mode or not, using the NS CONTROL.SPSEL.
   2644             */
   2645            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
   2646            bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
   2647            uint32_t limit;
   2648
   2649            if (!env->v7m.secure) {
   2650                return;
   2651            }
   2652
   2653            limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
   2654
   2655            val &= ~0x3;
   2656
   2657            if (val < limit) {
   2658                raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
   2659            }
   2660
   2661            if (is_psp) {
   2662                env->v7m.other_ss_psp = val;
   2663            } else {
   2664                env->v7m.other_ss_msp = val;
   2665            }
   2666            return;
   2667        }
   2668        default:
   2669            break;
   2670        }
   2671    }
   2672
   2673    switch (reg) {
   2674    case 0 ... 7: /* xPSR sub-fields */
   2675        v7m_msr_xpsr(env, mask, reg, val);
   2676        break;
   2677    case 8: /* MSP */
   2678        if (v7m_using_psp(env)) {
   2679            env->v7m.other_sp = val & ~3;
   2680        } else {
   2681            env->regs[13] = val & ~3;
   2682        }
   2683        break;
   2684    case 9: /* PSP */
   2685        if (v7m_using_psp(env)) {
   2686            env->regs[13] = val & ~3;
   2687        } else {
   2688            env->v7m.other_sp = val & ~3;
   2689        }
   2690        break;
   2691    case 10: /* MSPLIM */
   2692        if (!arm_feature(env, ARM_FEATURE_V8)) {
   2693            goto bad_reg;
   2694        }
   2695        env->v7m.msplim[env->v7m.secure] = val & ~7;
   2696        break;
   2697    case 11: /* PSPLIM */
   2698        if (!arm_feature(env, ARM_FEATURE_V8)) {
   2699            goto bad_reg;
   2700        }
   2701        env->v7m.psplim[env->v7m.secure] = val & ~7;
   2702        break;
   2703    case 16: /* PRIMASK */
   2704        env->v7m.primask[env->v7m.secure] = val & 1;
   2705        break;
   2706    case 17: /* BASEPRI */
   2707        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2708            goto bad_reg;
   2709        }
   2710        env->v7m.basepri[env->v7m.secure] = val & 0xff;
   2711        break;
   2712    case 18: /* BASEPRI_MAX */
   2713        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2714            goto bad_reg;
   2715        }
   2716        val &= 0xff;
   2717        if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
   2718                         || env->v7m.basepri[env->v7m.secure] == 0)) {
   2719            env->v7m.basepri[env->v7m.secure] = val;
   2720        }
   2721        break;
   2722    case 19: /* FAULTMASK */
   2723        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2724            goto bad_reg;
   2725        }
   2726        env->v7m.faultmask[env->v7m.secure] = val & 1;
   2727        break;
   2728    case 20: /* CONTROL */
   2729        /*
   2730         * Writing to the SPSEL bit only has an effect if we are in
   2731         * thread mode; other bits can be updated by any privileged code.
   2732         * write_v7m_control_spsel() deals with updating the SPSEL bit in
   2733         * env->v7m.control, so we only need update the others.
   2734         * For v7M, we must just ignore explicit writes to SPSEL in handler
   2735         * mode; for v8M the write is permitted but will have no effect.
   2736         * All these bits are writes-ignored from non-privileged code,
   2737         * except for SFPA.
   2738         */
   2739        if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
   2740                           !arm_v7m_is_handler_mode(env))) {
   2741            write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
   2742        }
   2743        if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2744            env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
   2745            env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
   2746        }
   2747        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
   2748            /*
   2749             * SFPA is RAZ/WI from NS or if no FPU.
   2750             * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
   2751             * Both are stored in the S bank.
   2752             */
   2753            if (env->v7m.secure) {
   2754                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
   2755                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
   2756            }
   2757            if (cur_el > 0 &&
   2758                (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
   2759                 extract32(env->v7m.nsacr, 10, 1))) {
   2760                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
   2761                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
   2762            }
   2763        }
   2764        break;
   2765    default:
   2766    bad_reg:
   2767        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
   2768                                       " register %d\n", reg);
   2769        return;
   2770    }
   2771}
   2772
   2773uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
   2774{
   2775    /* Implement the TT instruction. op is bits [7:6] of the insn. */
   2776    bool forceunpriv = op & 1;
   2777    bool alt = op & 2;
   2778    V8M_SAttributes sattrs = {};
   2779    uint32_t tt_resp;
   2780    bool r, rw, nsr, nsrw, mrvalid;
   2781    int prot;
   2782    ARMMMUFaultInfo fi = {};
   2783    MemTxAttrs attrs = {};
   2784    hwaddr phys_addr;
   2785    ARMMMUIdx mmu_idx;
   2786    uint32_t mregion;
   2787    bool targetpriv;
   2788    bool targetsec = env->v7m.secure;
   2789    bool is_subpage;
   2790
   2791    /*
   2792     * Work out what the security state and privilege level we're
   2793     * interested in is...
   2794     */
   2795    if (alt) {
   2796        targetsec = !targetsec;
   2797    }
   2798
   2799    if (forceunpriv) {
   2800        targetpriv = false;
   2801    } else {
   2802        targetpriv = arm_v7m_is_handler_mode(env) ||
   2803            !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
   2804    }
   2805
   2806    /* ...and then figure out which MMU index this is */
   2807    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
   2808
   2809    /*
   2810     * We know that the MPU and SAU don't care about the access type
   2811     * for our purposes beyond that we don't want to claim to be
   2812     * an insn fetch, so we arbitrarily call this a read.
   2813     */
   2814
   2815    /*
   2816     * MPU region info only available for privileged or if
   2817     * inspecting the other MPU state.
   2818     */
   2819    if (arm_current_el(env) != 0 || alt) {
   2820        /* We can ignore the return value as prot is always set */
   2821        pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
   2822                          &phys_addr, &attrs, &prot, &is_subpage,
   2823                          &fi, &mregion);
   2824        if (mregion == -1) {
   2825            mrvalid = false;
   2826            mregion = 0;
   2827        } else {
   2828            mrvalid = true;
   2829        }
   2830        r = prot & PAGE_READ;
   2831        rw = prot & PAGE_WRITE;
   2832    } else {
   2833        r = false;
   2834        rw = false;
   2835        mrvalid = false;
   2836        mregion = 0;
   2837    }
   2838
   2839    if (env->v7m.secure) {
   2840        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
   2841        nsr = sattrs.ns && r;
   2842        nsrw = sattrs.ns && rw;
   2843    } else {
   2844        sattrs.ns = true;
   2845        nsr = false;
   2846        nsrw = false;
   2847    }
   2848
   2849    tt_resp = (sattrs.iregion << 24) |
   2850        (sattrs.irvalid << 23) |
   2851        ((!sattrs.ns) << 22) |
   2852        (nsrw << 21) |
   2853        (nsr << 20) |
   2854        (rw << 19) |
   2855        (r << 18) |
   2856        (sattrs.srvalid << 17) |
   2857        (mrvalid << 16) |
   2858        (sattrs.sregion << 8) |
   2859        mregion;
   2860
   2861    return tt_resp;
   2862}
   2863
   2864#endif /* !CONFIG_USER_ONLY */
   2865
   2866ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
   2867                              bool secstate, bool priv, bool negpri)
   2868{
   2869    ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
   2870
   2871    if (priv) {
   2872        mmu_idx |= ARM_MMU_IDX_M_PRIV;
   2873    }
   2874
   2875    if (negpri) {
   2876        mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
   2877    }
   2878
   2879    if (secstate) {
   2880        mmu_idx |= ARM_MMU_IDX_M_S;
   2881    }
   2882
   2883    return mmu_idx;
   2884}
   2885
   2886ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
   2887                                                bool secstate, bool priv)
   2888{
   2889    bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
   2890
   2891    return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
   2892}
   2893
   2894/* Return the MMU index for a v7M CPU in the specified security state */
   2895ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
   2896{
   2897    bool priv = arm_v7m_is_handler_mode(env) ||
   2898        !(env->v7m.control[secstate] & 1);
   2899
   2900    return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
   2901}