cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

op_helper.c (7884B)


      1/*
      2 *  MIPS emulation helpers for qemu.
      3 *
      4 *  Copyright (c) 2004-2005 Jocelyn Mayer
      5 *
      6 * This library is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * This library is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 *
     19 */
     20
     21#include "qemu/osdep.h"
     22#include "cpu.h"
     23#include "internal.h"
     24#include "exec/helper-proto.h"
     25#include "exec/exec-all.h"
     26#include "exec/memop.h"
     27#include "fpu_helper.h"
     28
     29static inline target_ulong bitswap(target_ulong v)
     30{
     31    v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
     32              ((v & (target_ulong)0x5555555555555555ULL) << 1);
     33    v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
     34              ((v & (target_ulong)0x3333333333333333ULL) << 2);
     35    v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
     36              ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
     37    return v;
     38}
     39
     40#ifdef TARGET_MIPS64
     41target_ulong helper_dbitswap(target_ulong rt)
     42{
     43    return bitswap(rt);
     44}
     45#endif
     46
     47target_ulong helper_bitswap(target_ulong rt)
     48{
     49    return (int32_t)bitswap(rt);
     50}
     51
     52target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
     53                        uint32_t stripe)
     54{
     55    int i;
     56    uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
     57    uint64_t tmp1 = tmp0;
     58    for (i = 0; i <= 46; i++) {
     59        int s;
     60        if (i & 0x8) {
     61            s = shift;
     62        } else {
     63            s = shiftx;
     64        }
     65
     66        if (stripe != 0 && !(i & 0x4)) {
     67            s = ~s;
     68        }
     69        if (s & 0x10) {
     70            if (tmp0 & (1LL << (i + 16))) {
     71                tmp1 |= 1LL << i;
     72            } else {
     73                tmp1 &= ~(1LL << i);
     74            }
     75        }
     76    }
     77
     78    uint64_t tmp2 = tmp1;
     79    for (i = 0; i <= 38; i++) {
     80        int s;
     81        if (i & 0x4) {
     82            s = shift;
     83        } else {
     84            s = shiftx;
     85        }
     86
     87        if (s & 0x8) {
     88            if (tmp1 & (1LL << (i + 8))) {
     89                tmp2 |= 1LL << i;
     90            } else {
     91                tmp2 &= ~(1LL << i);
     92            }
     93        }
     94    }
     95
     96    uint64_t tmp3 = tmp2;
     97    for (i = 0; i <= 34; i++) {
     98        int s;
     99        if (i & 0x2) {
    100            s = shift;
    101        } else {
    102            s = shiftx;
    103        }
    104        if (s & 0x4) {
    105            if (tmp2 & (1LL << (i + 4))) {
    106                tmp3 |= 1LL << i;
    107            } else {
    108                tmp3 &= ~(1LL << i);
    109            }
    110        }
    111    }
    112
    113    uint64_t tmp4 = tmp3;
    114    for (i = 0; i <= 32; i++) {
    115        int s;
    116        if (i & 0x1) {
    117            s = shift;
    118        } else {
    119            s = shiftx;
    120        }
    121        if (s & 0x2) {
    122            if (tmp3 & (1LL << (i + 2))) {
    123                tmp4 |= 1LL << i;
    124            } else {
    125                tmp4 &= ~(1LL << i);
    126            }
    127        }
    128    }
    129
    130    uint64_t tmp5 = tmp4;
    131    for (i = 0; i <= 31; i++) {
    132        int s;
    133        s = shift;
    134        if (s & 0x1) {
    135            if (tmp4 & (1LL << (i + 1))) {
    136                tmp5 |= 1LL << i;
    137            } else {
    138                tmp5 &= ~(1LL << i);
    139            }
    140        }
    141    }
    142
    143    return (int64_t)(int32_t)(uint32_t)tmp5;
    144}
    145
    146void helper_fork(target_ulong arg1, target_ulong arg2)
    147{
    148    /*
    149     * arg1 = rt, arg2 = rs
    150     * TODO: store to TC register
    151     */
    152}
    153
    154target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
    155{
    156    target_long arg1 = arg;
    157
    158    if (arg1 < 0) {
    159        /* No scheduling policy implemented. */
    160        if (arg1 != -2) {
    161            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
    162                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
    163                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
    164                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
    165                do_raise_exception(env, EXCP_THREAD, GETPC());
    166            }
    167        }
    168    } else if (arg1 == 0) {
    169        if (0) {
    170            /* TODO: TC underflow */
    171            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
    172            do_raise_exception(env, EXCP_THREAD, GETPC());
    173        } else {
    174            /* TODO: Deallocate TC */
    175        }
    176    } else if (arg1 > 0) {
    177        /* Yield qualifier inputs not implemented. */
    178        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
    179        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
    180        do_raise_exception(env, EXCP_THREAD, GETPC());
    181    }
    182    return env->CP0_YQMask;
    183}
    184
    185static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
    186{
    187    if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
    188        return;
    189    }
    190    do_raise_exception(env, EXCP_RI, pc);
    191}
    192
    193target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
    194{
    195    check_hwrena(env, 0, GETPC());
    196    return env->CP0_EBase & 0x3ff;
    197}
    198
    199target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
    200{
    201    check_hwrena(env, 1, GETPC());
    202    return env->SYNCI_Step;
    203}
    204
    205target_ulong helper_rdhwr_cc(CPUMIPSState *env)
    206{
    207    check_hwrena(env, 2, GETPC());
    208#ifdef CONFIG_USER_ONLY
    209    return env->CP0_Count;
    210#else
    211    return (int32_t)cpu_mips_get_count(env);
    212#endif
    213}
    214
    215target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
    216{
    217    check_hwrena(env, 3, GETPC());
    218    return env->CCRes;
    219}
    220
    221target_ulong helper_rdhwr_performance(CPUMIPSState *env)
    222{
    223    check_hwrena(env, 4, GETPC());
    224    return env->CP0_Performance0;
    225}
    226
    227target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
    228{
    229    check_hwrena(env, 5, GETPC());
    230    return (env->CP0_Config5 >> CP0C5_XNP) & 1;
    231}
    232
    233void helper_pmon(CPUMIPSState *env, int function)
    234{
    235    function /= 2;
    236    switch (function) {
    237    case 2: /* TODO: char inbyte(int waitflag); */
    238        if (env->active_tc.gpr[4] == 0) {
    239            env->active_tc.gpr[2] = -1;
    240        }
    241        /* Fall through */
    242    case 11: /* TODO: char inbyte (void); */
    243        env->active_tc.gpr[2] = -1;
    244        break;
    245    case 3:
    246    case 12:
    247        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
    248        break;
    249    case 17:
    250        break;
    251    case 158:
    252        {
    253            unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
    254            printf("%s", fmt);
    255        }
    256        break;
    257    }
    258}
    259
    260#if !defined(CONFIG_USER_ONLY)
    261
    262void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
    263                                  MMUAccessType access_type,
    264                                  int mmu_idx, uintptr_t retaddr)
    265{
    266    MIPSCPU *cpu = MIPS_CPU(cs);
    267    CPUMIPSState *env = &cpu->env;
    268    int error_code = 0;
    269    int excp;
    270
    271    if (!(env->hflags & MIPS_HFLAG_DM)) {
    272        env->CP0_BadVAddr = addr;
    273    }
    274
    275    if (access_type == MMU_DATA_STORE) {
    276        excp = EXCP_AdES;
    277    } else {
    278        excp = EXCP_AdEL;
    279        if (access_type == MMU_INST_FETCH) {
    280            error_code |= EXCP_INST_NOTAVAIL;
    281        }
    282    }
    283
    284    do_raise_exception_err(env, excp, error_code, retaddr);
    285}
    286
    287void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
    288                                    vaddr addr, unsigned size,
    289                                    MMUAccessType access_type,
    290                                    int mmu_idx, MemTxAttrs attrs,
    291                                    MemTxResult response, uintptr_t retaddr)
    292{
    293    MIPSCPU *cpu = MIPS_CPU(cs);
    294    MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu);
    295    CPUMIPSState *env = &cpu->env;
    296
    297    if (access_type == MMU_INST_FETCH) {
    298        do_raise_exception(env, EXCP_IBE, retaddr);
    299    } else if (!mcc->no_data_aborts) {
    300        do_raise_exception(env, EXCP_DBE, retaddr);
    301    }
    302}
    303#endif /* !CONFIG_USER_ONLY */