cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

helper.c (9610B)


      1/*
      2 *  MicroBlaze helper routines.
      3 *
      4 *  Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
      5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
      6 *
      7 * This library is free software; you can redistribute it and/or
      8 * modify it under the terms of the GNU Lesser General Public
      9 * License as published by the Free Software Foundation; either
     10 * version 2.1 of the License, or (at your option) any later version.
     11 *
     12 * This library is distributed in the hope that it will be useful,
     13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15 * Lesser General Public License for more details.
     16 *
     17 * You should have received a copy of the GNU Lesser General Public
     18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     19 */
     20
     21#include "qemu/osdep.h"
     22#include "cpu.h"
     23#include "exec/exec-all.h"
     24#include "qemu/host-utils.h"
     25#include "exec/log.h"
     26
     27#if defined(CONFIG_USER_ONLY)
     28
     29bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     30                     MMUAccessType access_type, int mmu_idx,
     31                     bool probe, uintptr_t retaddr)
     32{
     33    cs->exception_index = 0xaa;
     34    cpu_loop_exit_restore(cs, retaddr);
     35}
     36
     37#else /* !CONFIG_USER_ONLY */
     38
     39static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
     40                                    MMUAccessType access_type)
     41{
     42    if (access_type == MMU_INST_FETCH) {
     43        return !cpu->ns_axi_ip;
     44    } else {
     45        return !cpu->ns_axi_dp;
     46    }
     47}
     48
     49bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     50                     MMUAccessType access_type, int mmu_idx,
     51                     bool probe, uintptr_t retaddr)
     52{
     53    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
     54    CPUMBState *env = &cpu->env;
     55    MicroBlazeMMULookup lu;
     56    unsigned int hit;
     57    int prot;
     58    MemTxAttrs attrs = {};
     59
     60    attrs.secure = mb_cpu_access_is_secure(cpu, access_type);
     61
     62    if (mmu_idx == MMU_NOMMU_IDX) {
     63        /* MMU disabled or not available.  */
     64        address &= TARGET_PAGE_MASK;
     65        prot = PAGE_BITS;
     66        tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
     67                                TARGET_PAGE_SIZE);
     68        return true;
     69    }
     70
     71    hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
     72    if (likely(hit)) {
     73        uint32_t vaddr = address & TARGET_PAGE_MASK;
     74        uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
     75
     76        qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
     77                      mmu_idx, vaddr, paddr, lu.prot);
     78        tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
     79                                TARGET_PAGE_SIZE);
     80        return true;
     81    }
     82
     83    /* TLB miss.  */
     84    if (probe) {
     85        return false;
     86    }
     87
     88    qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
     89                  mmu_idx, address);
     90
     91    env->ear = address;
     92    switch (lu.err) {
     93    case ERR_PROT:
     94        env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
     95        env->esr |= (access_type == MMU_DATA_STORE) << 10;
     96        break;
     97    case ERR_MISS:
     98        env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
     99        env->esr |= (access_type == MMU_DATA_STORE) << 10;
    100        break;
    101    default:
    102        abort();
    103    }
    104
    105    if (cs->exception_index == EXCP_MMU) {
    106        cpu_abort(cs, "recursive faults\n");
    107    }
    108
    109    /* TLB miss.  */
    110    cs->exception_index = EXCP_MMU;
    111    cpu_loop_exit_restore(cs, retaddr);
    112}
    113
    114void mb_cpu_do_interrupt(CPUState *cs)
    115{
    116    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
    117    CPUMBState *env = &cpu->env;
    118    uint32_t t, msr = mb_cpu_read_msr(env);
    119    bool set_esr;
    120
    121    /* IMM flag cannot propagate across a branch and into the dslot.  */
    122    assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
    123    /* BIMM flag cannot be set without D_FLAG. */
    124    assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
    125    /* RTI flags are private to translate. */
    126    assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
    127
    128    switch (cs->exception_index) {
    129    case EXCP_HW_EXCP:
    130        if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
    131            qemu_log_mask(LOG_GUEST_ERROR,
    132                          "Exception raised on system without exceptions!\n");
    133            return;
    134        }
    135
    136        qemu_log_mask(CPU_LOG_INT,
    137                      "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
    138                      env->pc, msr, env->iflags);
    139
    140        /* Exception breaks branch + dslot sequence?  */
    141        set_esr = true;
    142        env->esr &= ~D_FLAG;
    143        if (env->iflags & D_FLAG) {
    144            env->esr |= D_FLAG;
    145            env->btr = env->btarget;
    146        }
    147
    148        /* Exception in progress. */
    149        msr |= MSR_EIP;
    150        env->regs[17] = env->pc + 4;
    151        env->pc = cpu->cfg.base_vectors + 0x20;
    152        break;
    153
    154    case EXCP_MMU:
    155        qemu_log_mask(CPU_LOG_INT,
    156                      "INT: MMU at pc=%08x msr=%08x "
    157                      "ear=%" PRIx64 " iflags=%x\n",
    158                      env->pc, msr, env->ear, env->iflags);
    159
    160        /* Exception breaks branch + dslot sequence? */
    161        set_esr = true;
    162        env->esr &= ~D_FLAG;
    163        if (env->iflags & D_FLAG) {
    164            env->esr |= D_FLAG;
    165            env->btr = env->btarget;
    166            /* Reexecute the branch. */
    167            env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
    168        } else if (env->iflags & IMM_FLAG) {
    169            /* Reexecute the imm. */
    170            env->regs[17] = env->pc - 4;
    171        } else {
    172            env->regs[17] = env->pc;
    173        }
    174
    175        /* Exception in progress. */
    176        msr |= MSR_EIP;
    177        env->pc = cpu->cfg.base_vectors + 0x20;
    178        break;
    179
    180    case EXCP_IRQ:
    181        assert(!(msr & (MSR_EIP | MSR_BIP)));
    182        assert(msr & MSR_IE);
    183        assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
    184
    185        qemu_log_mask(CPU_LOG_INT,
    186                      "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
    187                      env->pc, msr, env->iflags);
    188        set_esr = false;
    189
    190        /* Disable interrupts.  */
    191        msr &= ~MSR_IE;
    192        env->regs[14] = env->pc;
    193        env->pc = cpu->cfg.base_vectors + 0x10;
    194        break;
    195
    196    case EXCP_HW_BREAK:
    197        assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
    198
    199        qemu_log_mask(CPU_LOG_INT,
    200                      "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
    201                      env->pc, msr, env->iflags);
    202        set_esr = false;
    203
    204        /* Break in progress. */
    205        msr |= MSR_BIP;
    206        env->regs[16] = env->pc;
    207        env->pc = cpu->cfg.base_vectors + 0x18;
    208        break;
    209
    210    default:
    211        cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
    212        /* not reached */
    213    }
    214
    215    /* Save previous mode, disable mmu, disable user-mode. */
    216    t = (msr & (MSR_VM | MSR_UM)) << 1;
    217    msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
    218    msr |= t;
    219    mb_cpu_write_msr(env, msr);
    220
    221    env->res_addr = RES_ADDR_NONE;
    222    env->iflags = 0;
    223
    224    if (!set_esr) {
    225        qemu_log_mask(CPU_LOG_INT,
    226                      "         to pc=%08x msr=%08x\n", env->pc, msr);
    227    } else if (env->esr & D_FLAG) {
    228        qemu_log_mask(CPU_LOG_INT,
    229                      "         to pc=%08x msr=%08x esr=%04x btr=%08x\n",
    230                      env->pc, msr, env->esr, env->btr);
    231    } else {
    232        qemu_log_mask(CPU_LOG_INT,
    233                      "         to pc=%08x msr=%08x esr=%04x\n",
    234                      env->pc, msr, env->esr);
    235    }
    236}
    237
    238hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
    239                                        MemTxAttrs *attrs)
    240{
    241    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
    242    CPUMBState *env = &cpu->env;
    243    target_ulong vaddr, paddr = 0;
    244    MicroBlazeMMULookup lu;
    245    int mmu_idx = cpu_mmu_index(env, false);
    246    unsigned int hit;
    247
    248    /* Caller doesn't initialize */
    249    *attrs = (MemTxAttrs) {};
    250    attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD);
    251
    252    if (mmu_idx != MMU_NOMMU_IDX) {
    253        hit = mmu_translate(cpu, &lu, addr, 0, 0);
    254        if (hit) {
    255            vaddr = addr & TARGET_PAGE_MASK;
    256            paddr = lu.paddr + vaddr - lu.vaddr;
    257        } else
    258            paddr = 0; /* ???.  */
    259    } else
    260        paddr = addr & TARGET_PAGE_MASK;
    261
    262    return paddr;
    263}
    264
    265bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
    266{
    267    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
    268    CPUMBState *env = &cpu->env;
    269
    270    if ((interrupt_request & CPU_INTERRUPT_HARD)
    271        && (env->msr & MSR_IE)
    272        && !(env->msr & (MSR_EIP | MSR_BIP))
    273        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
    274        cs->exception_index = EXCP_IRQ;
    275        mb_cpu_do_interrupt(cs);
    276        return true;
    277    }
    278    return false;
    279}
    280
    281#endif /* !CONFIG_USER_ONLY */
    282
    283void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
    284                                MMUAccessType access_type,
    285                                int mmu_idx, uintptr_t retaddr)
    286{
    287    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
    288    uint32_t esr, iflags;
    289
    290    /* Recover the pc and iflags from the corresponding insn_start.  */
    291    cpu_restore_state(cs, retaddr, true);
    292    iflags = cpu->env.iflags;
    293
    294    qemu_log_mask(CPU_LOG_INT,
    295                  "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
    296                  (target_ulong)addr, cpu->env.pc, iflags);
    297
    298    esr = ESR_EC_UNALIGNED_DATA;
    299    if (likely(iflags & ESR_ESS_FLAG)) {
    300        esr |= iflags & ESR_ESS_MASK;
    301    } else {
    302        qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
    303    }
    304
    305    cpu->env.ear = addr;
    306    cpu->env.esr = esr;
    307    cs->exception_index = EXCP_HW_EXCP;
    308    cpu_loop_exit(cs);
    309}