cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

tlb_helper.c (7385B)


      1/*
      2 * ARM TLB (Translation lookaside buffer) helpers.
      3 *
      4 * This code is licensed under the GNU GPL v2 or later.
      5 *
      6 * SPDX-License-Identifier: GPL-2.0-or-later
      7 */
      8#include "qemu/osdep.h"
      9#include "cpu.h"
     10#include "internals.h"
     11#include "exec/exec-all.h"
     12
     13static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
     14                                            unsigned int target_el,
     15                                            bool same_el, bool ea,
     16                                            bool s1ptw, bool is_write,
     17                                            int fsc)
     18{
     19    uint32_t syn;
     20
     21    /*
     22     * ISV is only set for data aborts routed to EL2 and
     23     * never for stage-1 page table walks faulting on stage 2.
     24     *
     25     * Furthermore, ISV is only set for certain kinds of load/stores.
     26     * If the template syndrome does not have ISV set, we should leave
     27     * it cleared.
     28     *
     29     * See ARMv8 specs, D7-1974:
     30     * ISS encoding for an exception from a Data Abort, the
     31     * ISV field.
     32     */
     33    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
     34        syn = syn_data_abort_no_iss(same_el, 0,
     35                                    ea, 0, s1ptw, is_write, fsc);
     36    } else {
     37        /*
     38         * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
     39         * syndrome created at translation time.
     40         * Now we create the runtime syndrome with the remaining fields.
     41         */
     42        syn = syn_data_abort_with_iss(same_el,
     43                                      0, 0, 0, 0, 0,
     44                                      ea, 0, s1ptw, is_write, fsc,
     45                                      true);
     46        /* Merge the runtime syndrome with the template syndrome.  */
     47        syn |= template_syn;
     48    }
     49    return syn;
     50}
     51
     52static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
     53                                            MMUAccessType access_type,
     54                                            int mmu_idx, ARMMMUFaultInfo *fi)
     55{
     56    CPUARMState *env = &cpu->env;
     57    int target_el;
     58    bool same_el;
     59    uint32_t syn, exc, fsr, fsc;
     60    ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
     61
     62    target_el = exception_target_el(env);
     63    if (fi->stage2) {
     64        target_el = 2;
     65        env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
     66        if (arm_is_secure_below_el3(env) && fi->s1ns) {
     67            env->cp15.hpfar_el2 |= HPFAR_NS;
     68        }
     69    }
     70    same_el = (arm_current_el(env) == target_el);
     71
     72    if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
     73        arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
     74        /*
     75         * LPAE format fault status register : bottom 6 bits are
     76         * status code in the same form as needed for syndrome
     77         */
     78        fsr = arm_fi_to_lfsc(fi);
     79        fsc = extract32(fsr, 0, 6);
     80    } else {
     81        fsr = arm_fi_to_sfsc(fi);
     82        /*
     83         * Short format FSR : this fault will never actually be reported
     84         * to an EL that uses a syndrome register. Use a (currently)
     85         * reserved FSR code in case the constructed syndrome does leak
     86         * into the guest somehow.
     87         */
     88        fsc = 0x3f;
     89    }
     90
     91    if (access_type == MMU_INST_FETCH) {
     92        syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
     93        exc = EXCP_PREFETCH_ABORT;
     94    } else {
     95        syn = merge_syn_data_abort(env->exception.syndrome, target_el,
     96                                   same_el, fi->ea, fi->s1ptw,
     97                                   access_type == MMU_DATA_STORE,
     98                                   fsc);
     99        if (access_type == MMU_DATA_STORE
    100            && arm_feature(env, ARM_FEATURE_V6)) {
    101            fsr |= (1 << 11);
    102        }
    103        exc = EXCP_DATA_ABORT;
    104    }
    105
    106    env->exception.vaddress = addr;
    107    env->exception.fsr = fsr;
    108    raise_exception(env, exc, syn, target_el);
    109}
    110
    111/* Raise a data fault alignment exception for the specified virtual address */
    112void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
    113                                 MMUAccessType access_type,
    114                                 int mmu_idx, uintptr_t retaddr)
    115{
    116    ARMCPU *cpu = ARM_CPU(cs);
    117    ARMMMUFaultInfo fi = {};
    118
    119    /* now we have a real cpu fault */
    120    cpu_restore_state(cs, retaddr, true);
    121
    122    fi.type = ARMFault_Alignment;
    123    arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
    124}
    125
    126#if !defined(CONFIG_USER_ONLY)
    127
    128/*
    129 * arm_cpu_do_transaction_failed: handle a memory system error response
    130 * (eg "no device/memory present at address") by raising an external abort
    131 * exception
    132 */
    133void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
    134                                   vaddr addr, unsigned size,
    135                                   MMUAccessType access_type,
    136                                   int mmu_idx, MemTxAttrs attrs,
    137                                   MemTxResult response, uintptr_t retaddr)
    138{
    139    ARMCPU *cpu = ARM_CPU(cs);
    140    ARMMMUFaultInfo fi = {};
    141
    142    /* now we have a real cpu fault */
    143    cpu_restore_state(cs, retaddr, true);
    144
    145    fi.ea = arm_extabort_type(response);
    146    fi.type = ARMFault_SyncExternal;
    147    arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
    148}
    149
    150#endif /* !defined(CONFIG_USER_ONLY) */
    151
    152bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
    153                      MMUAccessType access_type, int mmu_idx,
    154                      bool probe, uintptr_t retaddr)
    155{
    156    ARMCPU *cpu = ARM_CPU(cs);
    157    ARMMMUFaultInfo fi = {};
    158
    159#ifdef CONFIG_USER_ONLY
    160    int flags = page_get_flags(useronly_clean_ptr(address));
    161    if (flags & PAGE_VALID) {
    162        fi.type = ARMFault_Permission;
    163    } else {
    164        fi.type = ARMFault_Translation;
    165    }
    166    fi.level = 3;
    167
    168    /* now we have a real cpu fault */
    169    cpu_restore_state(cs, retaddr, true);
    170    arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
    171#else
    172    hwaddr phys_addr;
    173    target_ulong page_size;
    174    int prot, ret;
    175    MemTxAttrs attrs = {};
    176    ARMCacheAttrs cacheattrs = {};
    177
    178    /*
    179     * Walk the page table and (if the mapping exists) add the page
    180     * to the TLB.  On success, return true.  Otherwise, if probing,
    181     * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
    182     * register format, and signal the fault.
    183     */
    184    ret = get_phys_addr(&cpu->env, address, access_type,
    185                        core_to_arm_mmu_idx(&cpu->env, mmu_idx),
    186                        &phys_addr, &attrs, &prot, &page_size,
    187                        &fi, &cacheattrs);
    188    if (likely(!ret)) {
    189        /*
    190         * Map a single [sub]page. Regions smaller than our declared
    191         * target page size are handled specially, so for those we
    192         * pass in the exact addresses.
    193         */
    194        if (page_size >= TARGET_PAGE_SIZE) {
    195            phys_addr &= TARGET_PAGE_MASK;
    196            address &= TARGET_PAGE_MASK;
    197        }
    198        /* Notice and record tagged memory. */
    199        if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) {
    200            arm_tlb_mte_tagged(&attrs) = true;
    201        }
    202
    203        tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
    204                                prot, mmu_idx, page_size);
    205        return true;
    206    } else if (probe) {
    207        return false;
    208    } else {
    209        /* now we have a real cpu fault */
    210        cpu_restore_state(cs, retaddr, true);
    211        arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
    212    }
    213#endif
    214}