cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

op_helper.c (28824B)


      1/*
      2 *  ARM helper routines
      3 *
      4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
      5 *
      6 * This library is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * This library is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19#include "qemu/osdep.h"
     20#include "qemu/main-loop.h"
     21#include "cpu.h"
     22#include "exec/helper-proto.h"
     23#include "internals.h"
     24#include "exec/exec-all.h"
     25#include "exec/cpu_ldst.h"
     26
     27#define SIGNBIT (uint32_t)0x80000000
     28#define SIGNBIT64 ((uint64_t)1 << 63)
     29
     30void raise_exception(CPUARMState *env, uint32_t excp,
     31                     uint32_t syndrome, uint32_t target_el)
     32{
     33    CPUState *cs = env_cpu(env);
     34
     35    if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
     36        /*
     37         * Redirect NS EL1 exceptions to NS EL2. These are reported with
     38         * their original syndrome register value, with the exception of
     39         * SIMD/FP access traps, which are reported as uncategorized
     40         * (see DDI0478C.a D1.10.4)
     41         */
     42        target_el = 2;
     43        if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
     44            syndrome = syn_uncategorized();
     45        }
     46    }
     47
     48    assert(!excp_is_internal(excp));
     49    cs->exception_index = excp;
     50    env->exception.syndrome = syndrome;
     51    env->exception.target_el = target_el;
     52    cpu_loop_exit(cs);
     53}
     54
     55void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
     56                        uint32_t target_el, uintptr_t ra)
     57{
     58    CPUState *cs = env_cpu(env);
     59
     60    /*
     61     * restore_state_to_opc() will set env->exception.syndrome, so
     62     * we must restore CPU state here before setting the syndrome
     63     * the caller passed us, and cannot use cpu_loop_exit_restore().
     64     */
     65    cpu_restore_state(cs, ra, true);
     66    raise_exception(env, excp, syndrome, target_el);
     67}
     68
     69uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
     70                          uint64_t ireg, uint64_t def)
     71{
     72    uint64_t tmp, val = 0;
     73    uint32_t maxindex = ((desc & 3) + 1) * 8;
     74    uint32_t base_reg = desc >> 2;
     75    uint32_t shift, index, reg;
     76
     77    for (shift = 0; shift < 64; shift += 8) {
     78        index = (ireg >> shift) & 0xff;
     79        if (index < maxindex) {
     80            reg = base_reg + (index >> 3);
     81            tmp = *aa32_vfp_dreg(env, reg);
     82            tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
     83        } else {
     84            tmp = def & (0xffull << shift);
     85        }
     86        val |= tmp;
     87    }
     88    return val;
     89}
     90
     91void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
     92{
     93    /*
     94     * Perform the v8M stack limit check for SP updates from translated code,
     95     * raising an exception if the limit is breached.
     96     */
     97    if (newvalue < v7m_sp_limit(env)) {
     98        /*
     99         * Stack limit exceptions are a rare case, so rather than syncing
    100         * PC/condbits before the call, we use raise_exception_ra() so
    101         * that cpu_restore_state() will sort them out.
    102         */
    103        raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
    104    }
    105}
    106
    107uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
    108{
    109    uint32_t res = a + b;
    110    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
    111        env->QF = 1;
    112    return res;
    113}
    114
    115uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
    116{
    117    uint32_t res = a + b;
    118    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
    119        env->QF = 1;
    120        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    121    }
    122    return res;
    123}
    124
    125uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
    126{
    127    uint32_t res = a - b;
    128    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
    129        env->QF = 1;
    130        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    131    }
    132    return res;
    133}
    134
    135uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
    136{
    137    uint32_t res = a + b;
    138    if (res < a) {
    139        env->QF = 1;
    140        res = ~0;
    141    }
    142    return res;
    143}
    144
    145uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
    146{
    147    uint32_t res = a - b;
    148    if (res > a) {
    149        env->QF = 1;
    150        res = 0;
    151    }
    152    return res;
    153}
    154
    155/* Signed saturation.  */
    156static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
    157{
    158    int32_t top;
    159    uint32_t mask;
    160
    161    top = val >> shift;
    162    mask = (1u << shift) - 1;
    163    if (top > 0) {
    164        env->QF = 1;
    165        return mask;
    166    } else if (top < -1) {
    167        env->QF = 1;
    168        return ~mask;
    169    }
    170    return val;
    171}
    172
    173/* Unsigned saturation.  */
    174static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
    175{
    176    uint32_t max;
    177
    178    max = (1u << shift) - 1;
    179    if (val < 0) {
    180        env->QF = 1;
    181        return 0;
    182    } else if (val > max) {
    183        env->QF = 1;
    184        return max;
    185    }
    186    return val;
    187}
    188
    189/* Signed saturate.  */
    190uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
    191{
    192    return do_ssat(env, x, shift);
    193}
    194
    195/* Dual halfword signed saturate.  */
    196uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
    197{
    198    uint32_t res;
    199
    200    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
    201    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
    202    return res;
    203}
    204
    205/* Unsigned saturate.  */
    206uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
    207{
    208    return do_usat(env, x, shift);
    209}
    210
    211/* Dual halfword unsigned saturate.  */
    212uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
    213{
    214    uint32_t res;
    215
    216    res = (uint16_t)do_usat(env, (int16_t)x, shift);
    217    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
    218    return res;
    219}
    220
    221void HELPER(setend)(CPUARMState *env)
    222{
    223    env->uncached_cpsr ^= CPSR_E;
    224    arm_rebuild_hflags(env);
    225}
    226
    227void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
    228{
    229    /*
    230     * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
    231     * check if HSTR.TJDBX means we need to trap to EL2.
    232     */
    233    if (env->cp15.hstr_el2 & HSTR_TJDBX) {
    234        /*
    235         * We know the condition code check passed, so take the IMPDEF
    236         * choice to always report CV=1 COND 0xe
    237         */
    238        uint32_t syn = syn_bxjtrap(1, 0xe, rm);
    239        raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
    240    }
    241}
    242
    243#ifndef CONFIG_USER_ONLY
    244/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
    245 * The function returns the target EL (1-3) if the instruction is to be trapped;
    246 * otherwise it returns 0 indicating it is not trapped.
    247 */
    248static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
    249{
    250    int cur_el = arm_current_el(env);
    251    uint64_t mask;
    252
    253    if (arm_feature(env, ARM_FEATURE_M)) {
    254        /* M profile cores can never trap WFI/WFE. */
    255        return 0;
    256    }
    257
    258    /* If we are currently in EL0 then we need to check if SCTLR is set up for
    259     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
    260     */
    261    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
    262        int target_el;
    263
    264        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
    265        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
    266            /* Secure EL0 and Secure PL1 is at EL3 */
    267            target_el = 3;
    268        } else {
    269            target_el = 1;
    270        }
    271
    272        if (!(env->cp15.sctlr_el[target_el] & mask)) {
    273            return target_el;
    274        }
    275    }
    276
    277    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
    278     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
    279     * bits will be zero indicating no trap.
    280     */
    281    if (cur_el < 2) {
    282        mask = is_wfe ? HCR_TWE : HCR_TWI;
    283        if (arm_hcr_el2_eff(env) & mask) {
    284            return 2;
    285        }
    286    }
    287
    288    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
    289    if (cur_el < 3) {
    290        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
    291        if (env->cp15.scr_el3 & mask) {
    292            return 3;
    293        }
    294    }
    295
    296    return 0;
    297}
    298#endif
    299
    300void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
    301{
    302#ifdef CONFIG_USER_ONLY
    303    /*
    304     * WFI in the user-mode emulator is technically permitted but not
    305     * something any real-world code would do. AArch64 Linux kernels
    306     * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
    307     * AArch32 kernels don't trap it so it will delay a bit.
    308     * For QEMU, make it NOP here, because trying to raise EXCP_HLT
    309     * would trigger an abort.
    310     */
    311    return;
    312#else
    313    CPUState *cs = env_cpu(env);
    314    int target_el = check_wfx_trap(env, false);
    315
    316    if (cpu_has_work(cs)) {
    317        /* Don't bother to go into our "low power state" if
    318         * we would just wake up immediately.
    319         */
    320        return;
    321    }
    322
    323    if (target_el) {
    324        if (env->aarch64) {
    325            env->pc -= insn_len;
    326        } else {
    327            env->regs[15] -= insn_len;
    328        }
    329
    330        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
    331                        target_el);
    332    }
    333
    334    cs->exception_index = EXCP_HLT;
    335    cs->halted = 1;
    336    cpu_loop_exit(cs);
    337#endif
    338}
    339
    340void HELPER(wfe)(CPUARMState *env)
    341{
    342    /* This is a hint instruction that is semantically different
    343     * from YIELD even though we currently implement it identically.
    344     * Don't actually halt the CPU, just yield back to top
    345     * level loop. This is not going into a "low power state"
    346     * (ie halting until some event occurs), so we never take
    347     * a configurable trap to a different exception level.
    348     */
    349    HELPER(yield)(env);
    350}
    351
    352void HELPER(yield)(CPUARMState *env)
    353{
    354    CPUState *cs = env_cpu(env);
    355
    356    /* This is a non-trappable hint instruction that generally indicates
    357     * that the guest is currently busy-looping. Yield control back to the
    358     * top level loop so that a more deserving VCPU has a chance to run.
    359     */
    360    cs->exception_index = EXCP_YIELD;
    361    cpu_loop_exit(cs);
    362}
    363
    364/* Raise an internal-to-QEMU exception. This is limited to only
    365 * those EXCP values which are special cases for QEMU to interrupt
    366 * execution and not to be used for exceptions which are passed to
    367 * the guest (those must all have syndrome information and thus should
    368 * use exception_with_syndrome).
    369 */
    370void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
    371{
    372    CPUState *cs = env_cpu(env);
    373
    374    assert(excp_is_internal(excp));
    375    cs->exception_index = excp;
    376    cpu_loop_exit(cs);
    377}
    378
    379/* Raise an exception with the specified syndrome register value */
    380void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
    381                                     uint32_t syndrome, uint32_t target_el)
    382{
    383    raise_exception(env, excp, syndrome, target_el);
    384}
    385
    386/* Raise an EXCP_BKPT with the specified syndrome register value,
    387 * targeting the correct exception level for debug exceptions.
    388 */
    389void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
    390{
    391    int debug_el = arm_debug_target_el(env);
    392    int cur_el = arm_current_el(env);
    393
    394    /* FSR will only be used if the debug target EL is AArch32. */
    395    env->exception.fsr = arm_debug_exception_fsr(env);
    396    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
    397     * values to the guest that it shouldn't be able to see at its
    398     * exception/security level.
    399     */
    400    env->exception.vaddress = 0;
    401    /*
    402     * Other kinds of architectural debug exception are ignored if
    403     * they target an exception level below the current one (in QEMU
    404     * this is checked by arm_generate_debug_exceptions()). Breakpoint
    405     * instructions are special because they always generate an exception
    406     * to somewhere: if they can't go to the configured debug exception
    407     * level they are taken to the current exception level.
    408     */
    409    if (debug_el < cur_el) {
    410        debug_el = cur_el;
    411    }
    412    raise_exception(env, EXCP_BKPT, syndrome, debug_el);
    413}
    414
    415uint32_t HELPER(cpsr_read)(CPUARMState *env)
    416{
    417    return cpsr_read(env) & ~CPSR_EXEC;
    418}
    419
    420void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
    421{
    422    cpsr_write(env, val, mask, CPSRWriteByInstr);
    423    /* TODO: Not all cpsr bits are relevant to hflags.  */
    424    arm_rebuild_hflags(env);
    425}
    426
    427/* Write the CPSR for a 32-bit exception return */
    428void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
    429{
    430    uint32_t mask;
    431
    432    qemu_mutex_lock_iothread();
    433    arm_call_pre_el_change_hook(env_archcpu(env));
    434    qemu_mutex_unlock_iothread();
    435
    436    mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
    437    cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
    438
    439    /* Generated code has already stored the new PC value, but
    440     * without masking out its low bits, because which bits need
    441     * masking depends on whether we're returning to Thumb or ARM
    442     * state. Do the masking now.
    443     */
    444    env->regs[15] &= (env->thumb ? ~1 : ~3);
    445    arm_rebuild_hflags(env);
    446
    447    qemu_mutex_lock_iothread();
    448    arm_call_el_change_hook(env_archcpu(env));
    449    qemu_mutex_unlock_iothread();
    450}
    451
    452/* Access to user mode registers from privileged modes.  */
    453uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
    454{
    455    uint32_t val;
    456
    457    if (regno == 13) {
    458        val = env->banked_r13[BANK_USRSYS];
    459    } else if (regno == 14) {
    460        val = env->banked_r14[BANK_USRSYS];
    461    } else if (regno >= 8
    462               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
    463        val = env->usr_regs[regno - 8];
    464    } else {
    465        val = env->regs[regno];
    466    }
    467    return val;
    468}
    469
    470void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
    471{
    472    if (regno == 13) {
    473        env->banked_r13[BANK_USRSYS] = val;
    474    } else if (regno == 14) {
    475        env->banked_r14[BANK_USRSYS] = val;
    476    } else if (regno >= 8
    477               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
    478        env->usr_regs[regno - 8] = val;
    479    } else {
    480        env->regs[regno] = val;
    481    }
    482}
    483
    484void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
    485{
    486    if ((env->uncached_cpsr & CPSR_M) == mode) {
    487        env->regs[13] = val;
    488    } else {
    489        env->banked_r13[bank_number(mode)] = val;
    490    }
    491}
    492
    493uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
    494{
    495    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
    496        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
    497         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
    498         */
    499        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    500                        exception_target_el(env));
    501    }
    502
    503    if ((env->uncached_cpsr & CPSR_M) == mode) {
    504        return env->regs[13];
    505    } else {
    506        return env->banked_r13[bank_number(mode)];
    507    }
    508}
    509
    510static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
    511                                      uint32_t regno)
    512{
    513    /* Raise an exception if the requested access is one of the UNPREDICTABLE
    514     * cases; otherwise return. This broadly corresponds to the pseudocode
    515     * BankedRegisterAccessValid() and SPSRAccessValid(),
    516     * except that we have already handled some cases at translate time.
    517     */
    518    int curmode = env->uncached_cpsr & CPSR_M;
    519
    520    if (regno == 17) {
    521        /* ELR_Hyp: a special case because access from tgtmode is OK */
    522        if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
    523            goto undef;
    524        }
    525        return;
    526    }
    527
    528    if (curmode == tgtmode) {
    529        goto undef;
    530    }
    531
    532    if (tgtmode == ARM_CPU_MODE_USR) {
    533        switch (regno) {
    534        case 8 ... 12:
    535            if (curmode != ARM_CPU_MODE_FIQ) {
    536                goto undef;
    537            }
    538            break;
    539        case 13:
    540            if (curmode == ARM_CPU_MODE_SYS) {
    541                goto undef;
    542            }
    543            break;
    544        case 14:
    545            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
    546                goto undef;
    547            }
    548            break;
    549        default:
    550            break;
    551        }
    552    }
    553
    554    if (tgtmode == ARM_CPU_MODE_HYP) {
    555        /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
    556        if (curmode != ARM_CPU_MODE_MON) {
    557            goto undef;
    558        }
    559    }
    560
    561    return;
    562
    563undef:
    564    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    565                    exception_target_el(env));
    566}
    567
    568void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
    569                        uint32_t regno)
    570{
    571    msr_mrs_banked_exc_checks(env, tgtmode, regno);
    572
    573    switch (regno) {
    574    case 16: /* SPSRs */
    575        env->banked_spsr[bank_number(tgtmode)] = value;
    576        break;
    577    case 17: /* ELR_Hyp */
    578        env->elr_el[2] = value;
    579        break;
    580    case 13:
    581        env->banked_r13[bank_number(tgtmode)] = value;
    582        break;
    583    case 14:
    584        env->banked_r14[r14_bank_number(tgtmode)] = value;
    585        break;
    586    case 8 ... 12:
    587        switch (tgtmode) {
    588        case ARM_CPU_MODE_USR:
    589            env->usr_regs[regno - 8] = value;
    590            break;
    591        case ARM_CPU_MODE_FIQ:
    592            env->fiq_regs[regno - 8] = value;
    593            break;
    594        default:
    595            g_assert_not_reached();
    596        }
    597        break;
    598    default:
    599        g_assert_not_reached();
    600    }
    601}
    602
    603uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
    604{
    605    msr_mrs_banked_exc_checks(env, tgtmode, regno);
    606
    607    switch (regno) {
    608    case 16: /* SPSRs */
    609        return env->banked_spsr[bank_number(tgtmode)];
    610    case 17: /* ELR_Hyp */
    611        return env->elr_el[2];
    612    case 13:
    613        return env->banked_r13[bank_number(tgtmode)];
    614    case 14:
    615        return env->banked_r14[r14_bank_number(tgtmode)];
    616    case 8 ... 12:
    617        switch (tgtmode) {
    618        case ARM_CPU_MODE_USR:
    619            return env->usr_regs[regno - 8];
    620        case ARM_CPU_MODE_FIQ:
    621            return env->fiq_regs[regno - 8];
    622        default:
    623            g_assert_not_reached();
    624        }
    625    default:
    626        g_assert_not_reached();
    627    }
    628}
    629
    630void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
    631                                 uint32_t isread)
    632{
    633    const ARMCPRegInfo *ri = rip;
    634    int target_el;
    635
    636    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
    637        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
    638        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
    639    }
    640
    641    /*
    642     * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
    643     * to sysregs non accessible at EL0 to have UNDEF-ed already.
    644     */
    645    if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 &&
    646        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
    647        uint32_t mask = 1 << ri->crn;
    648
    649        if (ri->type & ARM_CP_64BIT) {
    650            mask = 1 << ri->crm;
    651        }
    652
    653        /* T4 and T14 are RES0 */
    654        mask &= ~((1 << 4) | (1 << 14));
    655
    656        if (env->cp15.hstr_el2 & mask) {
    657            target_el = 2;
    658            goto exept;
    659        }
    660    }
    661
    662    if (!ri->accessfn) {
    663        return;
    664    }
    665
    666    switch (ri->accessfn(env, ri, isread)) {
    667    case CP_ACCESS_OK:
    668        return;
    669    case CP_ACCESS_TRAP:
    670        target_el = exception_target_el(env);
    671        break;
    672    case CP_ACCESS_TRAP_EL2:
    673        /* Requesting a trap to EL2 when we're in EL3 is
    674         * a bug in the access function.
    675         */
    676        assert(arm_current_el(env) != 3);
    677        target_el = 2;
    678        break;
    679    case CP_ACCESS_TRAP_EL3:
    680        target_el = 3;
    681        break;
    682    case CP_ACCESS_TRAP_UNCATEGORIZED:
    683        target_el = exception_target_el(env);
    684        syndrome = syn_uncategorized();
    685        break;
    686    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
    687        target_el = 2;
    688        syndrome = syn_uncategorized();
    689        break;
    690    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
    691        target_el = 3;
    692        syndrome = syn_uncategorized();
    693        break;
    694    case CP_ACCESS_TRAP_FP_EL2:
    695        target_el = 2;
    696        /* Since we are an implementation that takes exceptions on a trapped
    697         * conditional insn only if the insn has passed its condition code
    698         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
    699         * (which is also the required value for AArch64 traps).
    700         */
    701        syndrome = syn_fp_access_trap(1, 0xe, false);
    702        break;
    703    case CP_ACCESS_TRAP_FP_EL3:
    704        target_el = 3;
    705        syndrome = syn_fp_access_trap(1, 0xe, false);
    706        break;
    707    default:
    708        g_assert_not_reached();
    709    }
    710
    711exept:
    712    raise_exception(env, EXCP_UDEF, syndrome, target_el);
    713}
    714
    715void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
    716{
    717    const ARMCPRegInfo *ri = rip;
    718
    719    if (ri->type & ARM_CP_IO) {
    720        qemu_mutex_lock_iothread();
    721        ri->writefn(env, ri, value);
    722        qemu_mutex_unlock_iothread();
    723    } else {
    724        ri->writefn(env, ri, value);
    725    }
    726}
    727
    728uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
    729{
    730    const ARMCPRegInfo *ri = rip;
    731    uint32_t res;
    732
    733    if (ri->type & ARM_CP_IO) {
    734        qemu_mutex_lock_iothread();
    735        res = ri->readfn(env, ri);
    736        qemu_mutex_unlock_iothread();
    737    } else {
    738        res = ri->readfn(env, ri);
    739    }
    740
    741    return res;
    742}
    743
    744void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
    745{
    746    const ARMCPRegInfo *ri = rip;
    747
    748    if (ri->type & ARM_CP_IO) {
    749        qemu_mutex_lock_iothread();
    750        ri->writefn(env, ri, value);
    751        qemu_mutex_unlock_iothread();
    752    } else {
    753        ri->writefn(env, ri, value);
    754    }
    755}
    756
    757uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
    758{
    759    const ARMCPRegInfo *ri = rip;
    760    uint64_t res;
    761
    762    if (ri->type & ARM_CP_IO) {
    763        qemu_mutex_lock_iothread();
    764        res = ri->readfn(env, ri);
    765        qemu_mutex_unlock_iothread();
    766    } else {
    767        res = ri->readfn(env, ri);
    768    }
    769
    770    return res;
    771}
    772
    773void HELPER(pre_hvc)(CPUARMState *env)
    774{
    775    ARMCPU *cpu = env_archcpu(env);
    776    int cur_el = arm_current_el(env);
    777    /* FIXME: Use actual secure state.  */
    778    bool secure = false;
    779    bool undef;
    780
    781    if (arm_is_psci_call(cpu, EXCP_HVC)) {
    782        /* If PSCI is enabled and this looks like a valid PSCI call then
    783         * that overrides the architecturally mandated HVC behaviour.
    784         */
    785        return;
    786    }
    787
    788    if (!arm_feature(env, ARM_FEATURE_EL2)) {
    789        /* If EL2 doesn't exist, HVC always UNDEFs */
    790        undef = true;
    791    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
    792        /* EL3.HCE has priority over EL2.HCD. */
    793        undef = !(env->cp15.scr_el3 & SCR_HCE);
    794    } else {
    795        undef = env->cp15.hcr_el2 & HCR_HCD;
    796    }
    797
    798    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
    799     * For ARMv8/AArch64, HVC is allowed in EL3.
    800     * Note that we've already trapped HVC from EL0 at translation
    801     * time.
    802     */
    803    if (secure && (!is_a64(env) || cur_el == 1)) {
    804        undef = true;
    805    }
    806
    807    if (undef) {
    808        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    809                        exception_target_el(env));
    810    }
    811}
    812
    813void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
    814{
    815    ARMCPU *cpu = env_archcpu(env);
    816    int cur_el = arm_current_el(env);
    817    bool secure = arm_is_secure(env);
    818    bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
    819
    820    /*
    821     * SMC behaviour is summarized in the following table.
    822     * This helper handles the "Trap to EL2" and "Undef insn" cases.
    823     * The "Trap to EL3" and "PSCI call" cases are handled in the exception
    824     * helper.
    825     *
    826     *  -> ARM_FEATURE_EL3 and !SMD
    827     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
    828     *
    829     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
    830     *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
    831     *  Conduit not SMC          Trap to EL2         Trap to EL3
    832     *
    833     *
    834     *  -> ARM_FEATURE_EL3 and SMD
    835     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
    836     *
    837     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
    838     *  Conduit SMC, inval call  Trap to EL2         Undef insn
    839     *  Conduit not SMC          Trap to EL2         Undef insn
    840     *
    841     *
    842     *  -> !ARM_FEATURE_EL3
    843     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
    844     *
    845     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
    846     *  Conduit SMC, inval call  Trap to EL2         Undef insn
    847     *  Conduit not SMC          Undef insn          Undef insn
    848     */
    849
    850    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
    851     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
    852     *  extensions, SMD only applies to NS state.
    853     * On ARMv7 without the Virtualization extensions, the SMD bit
    854     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
    855     * so we need not special case this here.
    856     */
    857    bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
    858                                                     : smd_flag && !secure;
    859
    860    if (!arm_feature(env, ARM_FEATURE_EL3) &&
    861        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
    862        /* If we have no EL3 then SMC always UNDEFs and can't be
    863         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
    864         * firmware within QEMU, and we want an EL2 guest to be able
    865         * to forbid its EL1 from making PSCI calls into QEMU's
    866         * "firmware" via HCR.TSC, so for these purposes treat
    867         * PSCI-via-SMC as implying an EL3.
    868         * This handles the very last line of the previous table.
    869         */
    870        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    871                        exception_target_el(env));
    872    }
    873
    874    if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
    875        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
    876         * We also want an EL2 guest to be able to forbid its EL1 from
    877         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
    878         * This handles all the "Trap to EL2" cases of the previous table.
    879         */
    880        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
    881    }
    882
    883    /* Catch the two remaining "Undef insn" cases of the previous table:
    884     *    - PSCI conduit is SMC but we don't have a valid PCSI call,
    885     *    - We don't have EL3 or SMD is set.
    886     */
    887    if (!arm_is_psci_call(cpu, EXCP_SMC) &&
    888        (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
    889        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    890                        exception_target_el(env));
    891    }
    892}
    893
    894/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
    895   The only way to do that in TCG is a conditional branch, which clobbers
    896   all our temporaries.  For now implement these as helper functions.  */
    897
    898/* Similarly for variable shift instructions.  */
    899
    900uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    901{
    902    int shift = i & 0xff;
    903    if (shift >= 32) {
    904        if (shift == 32)
    905            env->CF = x & 1;
    906        else
    907            env->CF = 0;
    908        return 0;
    909    } else if (shift != 0) {
    910        env->CF = (x >> (32 - shift)) & 1;
    911        return x << shift;
    912    }
    913    return x;
    914}
    915
    916uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    917{
    918    int shift = i & 0xff;
    919    if (shift >= 32) {
    920        if (shift == 32)
    921            env->CF = (x >> 31) & 1;
    922        else
    923            env->CF = 0;
    924        return 0;
    925    } else if (shift != 0) {
    926        env->CF = (x >> (shift - 1)) & 1;
    927        return x >> shift;
    928    }
    929    return x;
    930}
    931
    932uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    933{
    934    int shift = i & 0xff;
    935    if (shift >= 32) {
    936        env->CF = (x >> 31) & 1;
    937        return (int32_t)x >> 31;
    938    } else if (shift != 0) {
    939        env->CF = (x >> (shift - 1)) & 1;
    940        return (int32_t)x >> shift;
    941    }
    942    return x;
    943}
    944
    945uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    946{
    947    int shift1, shift;
    948    shift1 = i & 0xff;
    949    shift = shift1 & 0x1f;
    950    if (shift == 0) {
    951        if (shift1 != 0)
    952            env->CF = (x >> 31) & 1;
    953        return x;
    954    } else {
    955        env->CF = (x >> (shift - 1)) & 1;
    956        return ((uint32_t)x >> shift) | (x << (32 - shift));
    957    }
    958}
    959
    960void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
    961                          uint32_t access_type, uint32_t mmu_idx,
    962                          uint32_t size)
    963{
    964    uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
    965    uintptr_t ra = GETPC();
    966
    967    if (likely(size <= in_page)) {
    968        probe_access(env, ptr, size, access_type, mmu_idx, ra);
    969    } else {
    970        probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
    971        probe_access(env, ptr + in_page, size - in_page,
    972                     access_type, mmu_idx, ra);
    973    }
    974}