cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

helper_regs.c (8816B)


      1/*
      2 *  PowerPC emulation special registers manipulation helpers for qemu.
      3 *
      4 *  Copyright (c) 2003-2007 Jocelyn Mayer
      5 *
      6 * This library is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * This library is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19
     20#include "qemu/osdep.h"
     21#include "cpu.h"
     22#include "qemu/main-loop.h"
     23#include "exec/exec-all.h"
     24#include "sysemu/kvm.h"
     25#include "helper_regs.h"
     26
     27/* Swap temporary saved registers with GPRs */
     28void hreg_swap_gpr_tgpr(CPUPPCState *env)
     29{
     30    target_ulong tmp;
     31
     32    tmp = env->gpr[0];
     33    env->gpr[0] = env->tgpr[0];
     34    env->tgpr[0] = tmp;
     35    tmp = env->gpr[1];
     36    env->gpr[1] = env->tgpr[1];
     37    env->tgpr[1] = tmp;
     38    tmp = env->gpr[2];
     39    env->gpr[2] = env->tgpr[2];
     40    env->tgpr[2] = tmp;
     41    tmp = env->gpr[3];
     42    env->gpr[3] = env->tgpr[3];
     43    env->tgpr[3] = tmp;
     44}
     45
     46static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
     47{
     48    target_ulong msr = env->msr;
     49    uint32_t ppc_flags = env->flags;
     50    uint32_t hflags = 0;
     51    uint32_t msr_mask;
     52
     53    /* Some bits come straight across from MSR. */
     54    QEMU_BUILD_BUG_ON(MSR_LE != HFLAGS_LE);
     55    QEMU_BUILD_BUG_ON(MSR_PR != HFLAGS_PR);
     56    QEMU_BUILD_BUG_ON(MSR_DR != HFLAGS_DR);
     57    QEMU_BUILD_BUG_ON(MSR_FP != HFLAGS_FP);
     58    msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) |
     59                (1 << MSR_DR) | (1 << MSR_FP));
     60
     61    if (ppc_flags & POWERPC_FLAG_HID0_LE) {
     62        /*
     63         * Note that MSR_LE is not set in env->msr_mask for this cpu,
     64         * and so will never be set in msr.
     65         */
     66        uint32_t le = extract32(env->spr[SPR_HID0], 3, 1);
     67        hflags |= le << MSR_LE;
     68    }
     69
     70    if (ppc_flags & POWERPC_FLAG_DE) {
     71        target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
     72        if (dbcr0 & DBCR0_ICMP) {
     73            hflags |= 1 << HFLAGS_SE;
     74        }
     75        if (dbcr0 & DBCR0_BRT) {
     76            hflags |= 1 << HFLAGS_BE;
     77        }
     78    } else {
     79        if (ppc_flags & POWERPC_FLAG_BE) {
     80            QEMU_BUILD_BUG_ON(MSR_BE != HFLAGS_BE);
     81            msr_mask |= 1 << MSR_BE;
     82        }
     83        if (ppc_flags & POWERPC_FLAG_SE) {
     84            QEMU_BUILD_BUG_ON(MSR_SE != HFLAGS_SE);
     85            msr_mask |= 1 << MSR_SE;
     86        }
     87    }
     88
     89    if (msr_is_64bit(env, msr)) {
     90        hflags |= 1 << HFLAGS_64;
     91    }
     92    if ((ppc_flags & POWERPC_FLAG_SPE) && (msr & (1 << MSR_SPE))) {
     93        hflags |= 1 << HFLAGS_SPE;
     94    }
     95    if (ppc_flags & POWERPC_FLAG_VRE) {
     96        QEMU_BUILD_BUG_ON(MSR_VR != HFLAGS_VR);
     97        msr_mask |= 1 << MSR_VR;
     98    }
     99    if (ppc_flags & POWERPC_FLAG_VSX) {
    100        QEMU_BUILD_BUG_ON(MSR_VSX != HFLAGS_VSX);
    101        msr_mask |= 1 << MSR_VSX;
    102    }
    103    if ((ppc_flags & POWERPC_FLAG_TM) && (msr & (1ull << MSR_TM))) {
    104        hflags |= 1 << HFLAGS_TM;
    105    }
    106    if (env->spr[SPR_LPCR] & LPCR_GTSE) {
    107        hflags |= 1 << HFLAGS_GTSE;
    108    }
    109    if (env->spr[SPR_LPCR] & LPCR_HR) {
    110        hflags |= 1 << HFLAGS_HR;
    111    }
    112
    113#ifndef CONFIG_USER_ONLY
    114    if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) {
    115        hflags |= 1 << HFLAGS_HV;
    116    }
    117
    118    /*
    119     * This is our encoding for server processors. The architecture
    120     * specifies that there is no such thing as userspace with
    121     * translation off, however it appears that MacOS does it and some
    122     * 32-bit CPUs support it. Weird...
    123     *
    124     *   0 = Guest User space virtual mode
    125     *   1 = Guest Kernel space virtual mode
    126     *   2 = Guest User space real mode
    127     *   3 = Guest Kernel space real mode
    128     *   4 = HV User space virtual mode
    129     *   5 = HV Kernel space virtual mode
    130     *   6 = HV User space real mode
    131     *   7 = HV Kernel space real mode
    132     *
    133     * For BookE, we need 8 MMU modes as follow:
    134     *
    135     *  0 = AS 0 HV User space
    136     *  1 = AS 0 HV Kernel space
    137     *  2 = AS 1 HV User space
    138     *  3 = AS 1 HV Kernel space
    139     *  4 = AS 0 Guest User space
    140     *  5 = AS 0 Guest Kernel space
    141     *  6 = AS 1 Guest User space
    142     *  7 = AS 1 Guest Kernel space
    143     */
    144    unsigned immu_idx, dmmu_idx;
    145    dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1;
    146    if (env->mmu_model & POWERPC_MMU_BOOKE) {
    147        dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0;
    148        immu_idx = dmmu_idx;
    149        immu_idx |= msr & (1 << MSR_IS) ? 2 : 0;
    150        dmmu_idx |= msr & (1 << MSR_DS) ? 2 : 0;
    151    } else {
    152        dmmu_idx |= msr & (1ull << MSR_HV) ? 4 : 0;
    153        immu_idx = dmmu_idx;
    154        immu_idx |= msr & (1 << MSR_IR) ? 0 : 2;
    155        dmmu_idx |= msr & (1 << MSR_DR) ? 0 : 2;
    156    }
    157    hflags |= immu_idx << HFLAGS_IMMU_IDX;
    158    hflags |= dmmu_idx << HFLAGS_DMMU_IDX;
    159#endif
    160
    161    return hflags | (msr & msr_mask);
    162}
    163
    164void hreg_compute_hflags(CPUPPCState *env)
    165{
    166    env->hflags = hreg_compute_hflags_value(env);
    167}
    168
    169#ifdef CONFIG_DEBUG_TCG
    170void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
    171                          target_ulong *cs_base, uint32_t *flags)
    172{
    173    uint32_t hflags_current = env->hflags;
    174    uint32_t hflags_rebuilt;
    175
    176    *pc = env->nip;
    177    *cs_base = 0;
    178    *flags = hflags_current;
    179
    180    hflags_rebuilt = hreg_compute_hflags_value(env);
    181    if (unlikely(hflags_current != hflags_rebuilt)) {
    182        cpu_abort(env_cpu(env),
    183                  "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
    184                  hflags_current, hflags_rebuilt);
    185    }
    186}
    187#endif
    188
    189void cpu_interrupt_exittb(CPUState *cs)
    190{
    191    if (!kvm_enabled()) {
    192        return;
    193    }
    194
    195    if (!qemu_mutex_iothread_locked()) {
    196        qemu_mutex_lock_iothread();
    197        cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
    198        qemu_mutex_unlock_iothread();
    199    } else {
    200        cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
    201    }
    202}
    203
    204int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
    205{
    206    int excp;
    207#if !defined(CONFIG_USER_ONLY)
    208    CPUState *cs = env_cpu(env);
    209#endif
    210
    211    excp = 0;
    212    value &= env->msr_mask;
    213#if !defined(CONFIG_USER_ONLY)
    214    /* Neither mtmsr nor guest state can alter HV */
    215    if (!alter_hv || !(env->msr & MSR_HVB)) {
    216        value &= ~MSR_HVB;
    217        value |= env->msr & MSR_HVB;
    218    }
    219    if (((value >> MSR_IR) & 1) != msr_ir ||
    220        ((value >> MSR_DR) & 1) != msr_dr) {
    221        cpu_interrupt_exittb(cs);
    222    }
    223    if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
    224        ((value >> MSR_GS) & 1) != msr_gs) {
    225        cpu_interrupt_exittb(cs);
    226    }
    227    if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
    228                 ((value ^ env->msr) & (1 << MSR_TGPR)))) {
    229        /* Swap temporary saved registers with GPRs */
    230        hreg_swap_gpr_tgpr(env);
    231    }
    232    if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
    233        /* Change the exception prefix on PowerPC 601 */
    234        env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
    235    }
    236    /*
    237     * If PR=1 then EE, IR and DR must be 1
    238     *
    239     * Note: We only enforce this on 64-bit server processors.
    240     * It appears that:
    241     * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
    242     *   exploits it.
    243     * - 64-bit embedded implementations do not need any operation to be
    244     *   performed when PR is set.
    245     */
    246    if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) {
    247        value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
    248    }
    249#endif
    250    env->msr = value;
    251    hreg_compute_hflags(env);
    252#if !defined(CONFIG_USER_ONLY)
    253    if (unlikely(msr_pow == 1)) {
    254        if (!env->pending_interrupts && (*env->check_pow)(env)) {
    255            cs->halted = 1;
    256            excp = EXCP_HALTED;
    257        }
    258    }
    259#endif
    260
    261    return excp;
    262}
    263
    264#ifdef CONFIG_SOFTMMU
    265void store_40x_sler(CPUPPCState *env, uint32_t val)
    266{
    267    /* XXX: TO BE FIXED */
    268    if (val != 0x00000000) {
    269        cpu_abort(env_cpu(env),
    270                  "Little-endian regions are not supported by now\n");
    271    }
    272    env->spr[SPR_405_SLER] = val;
    273}
    274#endif /* CONFIG_SOFTMMU */
    275
    276#ifndef CONFIG_USER_ONLY
    277void check_tlb_flush(CPUPPCState *env, bool global)
    278{
    279    CPUState *cs = env_cpu(env);
    280
    281    /* Handle global flushes first */
    282    if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
    283        env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
    284        env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
    285        tlb_flush_all_cpus_synced(cs);
    286        return;
    287    }
    288
    289    /* Then handle local ones */
    290    if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
    291        env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
    292        tlb_flush(cs);
    293    }
    294}
    295#endif