cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

cpu_loop.c (16396B)


      1/*
      2 *  qemu user cpu loop
      3 *
      4 *  Copyright (c) 2003-2008 Fabrice Bellard
      5 *
      6 *  This program is free software; you can redistribute it and/or modify
      7 *  it under the terms of the GNU General Public License as published by
      8 *  the Free Software Foundation; either version 2 of the License, or
      9 *  (at your option) any later version.
     10 *
     11 *  This program is distributed in the hope that it will be useful,
     12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14 *  GNU General Public License for more details.
     15 *
     16 *  You should have received a copy of the GNU General Public License
     17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19
     20#include "qemu/osdep.h"
     21#include "qemu-common.h"
     22#include "qemu.h"
     23#include "user-internals.h"
     24#include "elf.h"
     25#include "cpu_loop-common.h"
     26#include "signal-common.h"
     27#include "semihosting/common-semi.h"
     28
     29#define get_user_code_u32(x, gaddr, env)                \
     30    ({ abi_long __r = get_user_u32((x), (gaddr));       \
     31        if (!__r && bswap_code(arm_sctlr_b(env))) {     \
     32            (x) = bswap32(x);                           \
     33        }                                               \
     34        __r;                                            \
     35    })
     36
     37#define get_user_code_u16(x, gaddr, env)                \
     38    ({ abi_long __r = get_user_u16((x), (gaddr));       \
     39        if (!__r && bswap_code(arm_sctlr_b(env))) {     \
     40            (x) = bswap16(x);                           \
     41        }                                               \
     42        __r;                                            \
     43    })
     44
     45#define get_user_data_u32(x, gaddr, env)                \
     46    ({ abi_long __r = get_user_u32((x), (gaddr));       \
     47        if (!__r && arm_cpu_bswap_data(env)) {          \
     48            (x) = bswap32(x);                           \
     49        }                                               \
     50        __r;                                            \
     51    })
     52
     53#define get_user_data_u16(x, gaddr, env)                \
     54    ({ abi_long __r = get_user_u16((x), (gaddr));       \
     55        if (!__r && arm_cpu_bswap_data(env)) {          \
     56            (x) = bswap16(x);                           \
     57        }                                               \
     58        __r;                                            \
     59    })
     60
     61#define put_user_data_u32(x, gaddr, env)                \
     62    ({ typeof(x) __x = (x);                             \
     63        if (arm_cpu_bswap_data(env)) {                  \
     64            __x = bswap32(__x);                         \
     65        }                                               \
     66        put_user_u32(__x, (gaddr));                     \
     67    })
     68
     69#define put_user_data_u16(x, gaddr, env)                \
     70    ({ typeof(x) __x = (x);                             \
     71        if (arm_cpu_bswap_data(env)) {                  \
     72            __x = bswap16(__x);                         \
     73        }                                               \
     74        put_user_u16(__x, (gaddr));                     \
     75    })
     76
     77/* Commpage handling -- there is no commpage for AArch64 */
     78
     79/*
     80 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
     81 * Input:
     82 * r0 = pointer to oldval
     83 * r1 = pointer to newval
     84 * r2 = pointer to target value
     85 *
     86 * Output:
     87 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
     88 * C set if *ptr was changed, clear if no exchange happened
     89 *
     90 * Note segv's in kernel helpers are a bit tricky, we can set the
     91 * data address sensibly but the PC address is just the entry point.
     92 */
     93static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
     94{
     95    uint64_t oldval, newval, val;
     96    uint32_t addr, cpsr;
     97
     98    /* Based on the 32 bit code in do_kernel_trap */
     99
    100    /* XXX: This only works between threads, not between processes.
    101       It's probably possible to implement this with native host
    102       operations. However things like ldrex/strex are much harder so
    103       there's not much point trying.  */
    104    start_exclusive();
    105    cpsr = cpsr_read(env);
    106    addr = env->regs[2];
    107
    108    if (get_user_u64(oldval, env->regs[0])) {
    109        env->exception.vaddress = env->regs[0];
    110        goto segv;
    111    };
    112
    113    if (get_user_u64(newval, env->regs[1])) {
    114        env->exception.vaddress = env->regs[1];
    115        goto segv;
    116    };
    117
    118    if (get_user_u64(val, addr)) {
    119        env->exception.vaddress = addr;
    120        goto segv;
    121    }
    122
    123    if (val == oldval) {
    124        val = newval;
    125
    126        if (put_user_u64(val, addr)) {
    127            env->exception.vaddress = addr;
    128            goto segv;
    129        };
    130
    131        env->regs[0] = 0;
    132        cpsr |= CPSR_C;
    133    } else {
    134        env->regs[0] = -1;
    135        cpsr &= ~CPSR_C;
    136    }
    137    cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
    138    end_exclusive();
    139    return;
    140
    141segv:
    142    end_exclusive();
    143    /* We get the PC of the entry address - which is as good as anything,
    144       on a real kernel what you get depends on which mode it uses. */
    145    /* XXX: check env->error_code */
    146    force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
    147                    env->exception.vaddress);
    148}
    149
    150/* Handle a jump to the kernel code page.  */
    151static int
    152do_kernel_trap(CPUARMState *env)
    153{
    154    uint32_t addr;
    155    uint32_t cpsr;
    156    uint32_t val;
    157
    158    switch (env->regs[15]) {
    159    case 0xffff0fa0: /* __kernel_memory_barrier */
    160        /* ??? No-op. Will need to do better for SMP.  */
    161        break;
    162    case 0xffff0fc0: /* __kernel_cmpxchg */
    163         /* XXX: This only works between threads, not between processes.
    164            It's probably possible to implement this with native host
    165            operations. However things like ldrex/strex are much harder so
    166            there's not much point trying.  */
    167        start_exclusive();
    168        cpsr = cpsr_read(env);
    169        addr = env->regs[2];
    170        /* FIXME: This should SEGV if the access fails.  */
    171        if (get_user_u32(val, addr))
    172            val = ~env->regs[0];
    173        if (val == env->regs[0]) {
    174            val = env->regs[1];
    175            /* FIXME: Check for segfaults.  */
    176            put_user_u32(val, addr);
    177            env->regs[0] = 0;
    178            cpsr |= CPSR_C;
    179        } else {
    180            env->regs[0] = -1;
    181            cpsr &= ~CPSR_C;
    182        }
    183        cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
    184        end_exclusive();
    185        break;
    186    case 0xffff0fe0: /* __kernel_get_tls */
    187        env->regs[0] = cpu_get_tls(env);
    188        break;
    189    case 0xffff0f60: /* __kernel_cmpxchg64 */
    190        arm_kernel_cmpxchg64_helper(env);
    191        break;
    192
    193    default:
    194        return 1;
    195    }
    196    /* Jump back to the caller.  */
    197    addr = env->regs[14];
    198    if (addr & 1) {
    199        env->thumb = 1;
    200        addr &= ~1;
    201    }
    202    env->regs[15] = addr;
    203
    204    return 0;
    205}
    206
    207static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
    208{
    209    /*
    210     * Return true if this insn is one of the three magic UDF insns
    211     * which the kernel treats as breakpoint insns.
    212     */
    213    if (!is_thumb) {
    214        return (opcode & 0x0fffffff) == 0x07f001f0;
    215    } else {
    216        /*
    217         * Note that we get the two halves of the 32-bit T32 insn
    218         * in the opposite order to the value the kernel uses in
    219         * its undef_hook struct.
    220         */
    221        return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0);
    222    }
    223}
    224
    225static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
    226{
    227    TaskState *ts = env_cpu(env)->opaque;
    228    int rc = EmulateAll(opcode, &ts->fpa, env);
    229    int raise, enabled;
    230
    231    if (rc == 0) {
    232        /* Illegal instruction */
    233        return false;
    234    }
    235    if (rc > 0) {
    236        /* Everything ok. */
    237        env->regs[15] += 4;
    238        return true;
    239    }
    240
    241    /* FP exception */
    242    rc = -rc;
    243    raise = 0;
    244
    245    /* Translate softfloat flags to FPSR flags */
    246    if (rc & float_flag_invalid) {
    247        raise |= BIT_IOC;
    248    }
    249    if (rc & float_flag_divbyzero) {
    250        raise |= BIT_DZC;
    251    }
    252    if (rc & float_flag_overflow) {
    253        raise |= BIT_OFC;
    254    }
    255    if (rc & float_flag_underflow) {
    256        raise |= BIT_UFC;
    257    }
    258    if (rc & float_flag_inexact) {
    259        raise |= BIT_IXC;
    260    }
    261
    262    /* Accumulate unenabled exceptions */
    263    enabled = ts->fpa.fpsr >> 16;
    264    ts->fpa.fpsr |= raise & ~enabled;
    265
    266    if (raise & enabled) {
    267        /*
    268         * The kernel's nwfpe emulator does not pass a real si_code.
    269         * It merely uses send_sig(SIGFPE, current, 1), which results in
    270         * __send_signal() filling out SI_KERNEL with pid and uid 0 (under
    271         * the "SEND_SIG_PRIV" case). That's what our force_sig() does.
    272         */
    273        force_sig(TARGET_SIGFPE);
    274    } else {
    275        env->regs[15] += 4;
    276    }
    277    return true;
    278}
    279
    280void cpu_loop(CPUARMState *env)
    281{
    282    CPUState *cs = env_cpu(env);
    283    int trapnr;
    284    unsigned int n, insn;
    285    abi_ulong ret;
    286
    287    for(;;) {
    288        cpu_exec_start(cs);
    289        trapnr = cpu_exec(cs);
    290        cpu_exec_end(cs);
    291        process_queued_cpu_work(cs);
    292
    293        switch(trapnr) {
    294        case EXCP_UDEF:
    295        case EXCP_NOCP:
    296        case EXCP_INVSTATE:
    297            {
    298                uint32_t opcode;
    299
    300                /* we handle the FPU emulation here, as Linux */
    301                /* we get the opcode */
    302                /* FIXME - what to do if get_user() fails? */
    303                get_user_code_u32(opcode, env->regs[15], env);
    304
    305                /*
    306                 * The Linux kernel treats some UDF patterns specially
    307                 * to use as breakpoints (instead of the architectural
    308                 * bkpt insn). These should trigger a SIGTRAP rather
    309                 * than SIGILL.
    310                 */
    311                if (insn_is_linux_bkpt(opcode, env->thumb)) {
    312                    goto excp_debug;
    313                }
    314
    315                if (!env->thumb && emulate_arm_fpa11(env, opcode)) {
    316                    break;
    317                }
    318
    319                force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN,
    320                                env->regs[15]);
    321            }
    322            break;
    323        case EXCP_SWI:
    324            {
    325                env->eabi = 1;
    326                /* system call */
    327                if (env->thumb) {
    328                    /* Thumb is always EABI style with syscall number in r7 */
    329                    n = env->regs[7];
    330                } else {
    331                    /*
    332                     * Equivalent of kernel CONFIG_OABI_COMPAT: read the
    333                     * Arm SVC insn to extract the immediate, which is the
    334                     * syscall number in OABI.
    335                     */
    336                    /* FIXME - what to do if get_user() fails? */
    337                    get_user_code_u32(insn, env->regs[15] - 4, env);
    338                    n = insn & 0xffffff;
    339                    if (n == 0) {
    340                        /* zero immediate: EABI, syscall number in r7 */
    341                        n = env->regs[7];
    342                    } else {
    343                        /*
    344                         * This XOR matches the kernel code: an immediate
    345                         * in the valid range (0x900000 .. 0x9fffff) is
    346                         * converted into the correct EABI-style syscall
    347                         * number; invalid immediates end up as values
    348                         * > 0xfffff and are handled below as out-of-range.
    349                         */
    350                        n ^= ARM_SYSCALL_BASE;
    351                        env->eabi = 0;
    352                    }
    353                }
    354
    355                if (n > ARM_NR_BASE) {
    356                    switch (n) {
    357                    case ARM_NR_cacheflush:
    358                        /* nop */
    359                        break;
    360                    case ARM_NR_set_tls:
    361                        cpu_set_tls(env, env->regs[0]);
    362                        env->regs[0] = 0;
    363                        break;
    364                    case ARM_NR_breakpoint:
    365                        env->regs[15] -= env->thumb ? 2 : 4;
    366                        goto excp_debug;
    367                    case ARM_NR_get_tls:
    368                        env->regs[0] = cpu_get_tls(env);
    369                        break;
    370                    default:
    371                        if (n < 0xf0800) {
    372                            /*
    373                             * Syscalls 0xf0000..0xf07ff (or 0x9f0000..
    374                             * 0x9f07ff in OABI numbering) are defined
    375                             * to return -ENOSYS rather than raising
    376                             * SIGILL. Note that we have already
    377                             * removed the 0x900000 prefix.
    378                             */
    379                            qemu_log_mask(LOG_UNIMP,
    380                                "qemu: Unsupported ARM syscall: 0x%x\n",
    381                                          n);
    382                            env->regs[0] = -TARGET_ENOSYS;
    383                        } else {
    384                            /*
    385                             * Otherwise SIGILL. This includes any SWI with
    386                             * immediate not originally 0x9fxxxx, because
    387                             * of the earlier XOR.
    388                             * Like the real kernel, we report the addr of the
    389                             * SWI in the siginfo si_addr but leave the PC
    390                             * pointing at the insn after the SWI.
    391                             */
    392                            abi_ulong faultaddr = env->regs[15];
    393                            faultaddr -= env->thumb ? 2 : 4;
    394                            force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP,
    395                                            faultaddr);
    396                        }
    397                        break;
    398                    }
    399                } else {
    400                    ret = do_syscall(env,
    401                                     n,
    402                                     env->regs[0],
    403                                     env->regs[1],
    404                                     env->regs[2],
    405                                     env->regs[3],
    406                                     env->regs[4],
    407                                     env->regs[5],
    408                                     0, 0);
    409                    if (ret == -TARGET_ERESTARTSYS) {
    410                        env->regs[15] -= env->thumb ? 2 : 4;
    411                    } else if (ret != -TARGET_QEMU_ESIGRETURN) {
    412                        env->regs[0] = ret;
    413                    }
    414                }
    415            }
    416            break;
    417        case EXCP_SEMIHOST:
    418            env->regs[0] = do_common_semihosting(cs);
    419            env->regs[15] += env->thumb ? 2 : 4;
    420            break;
    421        case EXCP_INTERRUPT:
    422            /* just indicate that signals should be handled asap */
    423            break;
    424        case EXCP_PREFETCH_ABORT:
    425        case EXCP_DATA_ABORT:
    426            /* XXX: check env->error_code */
    427            force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
    428                            env->exception.vaddress);
    429            break;
    430        case EXCP_DEBUG:
    431        case EXCP_BKPT:
    432        excp_debug:
    433            force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]);
    434            break;
    435        case EXCP_KERNEL_TRAP:
    436            if (do_kernel_trap(env))
    437              goto error;
    438            break;
    439        case EXCP_YIELD:
    440            /* nothing to do here for user-mode, just resume guest code */
    441            break;
    442        case EXCP_ATOMIC:
    443            cpu_exec_step_atomic(cs);
    444            break;
    445        default:
    446        error:
    447            EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
    448            abort();
    449        }
    450        process_pending_signals(env);
    451    }
    452}
    453
    454void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
    455{
    456    CPUState *cpu = env_cpu(env);
    457    TaskState *ts = cpu->opaque;
    458    struct image_info *info = ts->info;
    459    int i;
    460
    461    cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
    462               CPSRWriteByInstr);
    463    for(i = 0; i < 16; i++) {
    464        env->regs[i] = regs->uregs[i];
    465    }
    466#ifdef TARGET_WORDS_BIGENDIAN
    467    /* Enable BE8.  */
    468    if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
    469        && (info->elf_flags & EF_ARM_BE8)) {
    470        env->uncached_cpsr |= CPSR_E;
    471        env->cp15.sctlr_el[1] |= SCTLR_E0E;
    472    } else {
    473        env->cp15.sctlr_el[1] |= SCTLR_B;
    474    }
    475    arm_rebuild_hflags(env);
    476#endif
    477
    478    ts->stack_base = info->start_stack;
    479    ts->heap_base = info->brk;
    480    /* This will be filled in on the first SYS_HEAPINFO call.  */
    481    ts->heap_limit = 0;
    482}