cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

mem_helper.c (12374B)


      1/*
      2 *  HPPA memory access helper routines
      3 *
      4 *  Copyright (c) 2017 Helge Deller
      5 *
      6 * This library is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * This library is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19
     20#include "qemu/osdep.h"
     21#include "cpu.h"
     22#include "exec/exec-all.h"
     23#include "exec/helper-proto.h"
     24#include "hw/core/cpu.h"
     25#include "trace.h"
     26
     27#ifdef CONFIG_USER_ONLY
     28bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     29                       MMUAccessType access_type, int mmu_idx,
     30                       bool probe, uintptr_t retaddr)
     31{
     32    HPPACPU *cpu = HPPA_CPU(cs);
     33
     34    /* ??? Test between data page fault and data memory protection trap,
     35       which would affect si_code.  */
     36    cs->exception_index = EXCP_DMP;
     37    cpu->env.cr[CR_IOR] = address;
     38    cpu_loop_exit_restore(cs, retaddr);
     39}
     40#else
     41static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
     42{
     43    int i;
     44
     45    for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
     46        hppa_tlb_entry *ent = &env->tlb[i];
     47        if (ent->va_b <= addr && addr <= ent->va_e) {
     48            trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
     49                                      ent->va_b, ent->va_e, ent->pa);
     50            return ent;
     51        }
     52    }
     53    trace_hppa_tlb_find_entry_not_found(env, addr);
     54    return NULL;
     55}
     56
     57static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
     58{
     59    CPUState *cs = env_cpu(env);
     60    unsigned i, n = 1 << (2 * ent->page_size);
     61    uint64_t addr = ent->va_b;
     62
     63    trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
     64
     65    for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
     66        /* Do not flush MMU_PHYS_IDX.  */
     67        tlb_flush_page_by_mmuidx(cs, addr, 0xf);
     68    }
     69
     70    memset(ent, 0, sizeof(*ent));
     71    ent->va_b = -1;
     72}
     73
     74static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
     75{
     76    hppa_tlb_entry *ent;
     77    uint32_t i = env->tlb_last;
     78
     79    env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
     80    ent = &env->tlb[i];
     81
     82    hppa_flush_tlb_ent(env, ent);
     83    return ent;
     84}
     85
     86int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
     87                              int type, hwaddr *pphys, int *pprot)
     88{
     89    hwaddr phys;
     90    int prot, r_prot, w_prot, x_prot;
     91    hppa_tlb_entry *ent;
     92    int ret = -1;
     93
     94    /* Virtual translation disabled.  Direct map virtual to physical.  */
     95    if (mmu_idx == MMU_PHYS_IDX) {
     96        phys = addr;
     97        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
     98        goto egress;
     99    }
    100
    101    /* Find a valid tlb entry that matches the virtual address.  */
    102    ent = hppa_find_tlb(env, addr);
    103    if (ent == NULL || !ent->entry_valid) {
    104        phys = 0;
    105        prot = 0;
    106        ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
    107        goto egress;
    108    }
    109
    110    /* We now know the physical address.  */
    111    phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
    112
    113    /* Map TLB access_rights field to QEMU protection.  */
    114    r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
    115    w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
    116    x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
    117    switch (ent->ar_type) {
    118    case 0: /* read-only: data page */
    119        prot = r_prot;
    120        break;
    121    case 1: /* read/write: dynamic data page */
    122        prot = r_prot | w_prot;
    123        break;
    124    case 2: /* read/execute: normal code page */
    125        prot = r_prot | x_prot;
    126        break;
    127    case 3: /* read/write/execute: dynamic code page */
    128        prot = r_prot | w_prot | x_prot;
    129        break;
    130    default: /* execute: promote to privilege level type & 3 */
    131        prot = x_prot;
    132        break;
    133    }
    134
    135    /* access_id == 0 means public page and no check is performed */
    136    if ((env->psw & PSW_P) && ent->access_id) {
    137        /* If bits [31:1] match, and bit 0 is set, suppress write.  */
    138        int match = ent->access_id * 2 + 1;
    139
    140        if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
    141            match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
    142            prot &= PAGE_READ | PAGE_EXEC;
    143            if (type == PAGE_WRITE) {
    144                ret = EXCP_DMPI;
    145                goto egress;
    146            }
    147        }
    148    }
    149
    150    /* No guest access type indicates a non-architectural access from
    151       within QEMU.  Bypass checks for access, D, B and T bits.  */
    152    if (type == 0) {
    153        goto egress;
    154    }
    155
    156    if (unlikely(!(prot & type))) {
    157        /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
    158        ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
    159        goto egress;
    160    }
    161
    162    /* In reverse priority order, check for conditions which raise faults.
    163       As we go, remove PROT bits that cover the condition we want to check.
    164       In this way, the resulting PROT will force a re-check of the
    165       architectural TLB entry for the next access.  */
    166    if (unlikely(!ent->d)) {
    167        if (type & PAGE_WRITE) {
    168            /* The D bit is not set -- TLB Dirty Bit Fault.  */
    169            ret = EXCP_TLB_DIRTY;
    170        }
    171        prot &= PAGE_READ | PAGE_EXEC;
    172    }
    173    if (unlikely(ent->b)) {
    174        if (type & PAGE_WRITE) {
    175            /* The B bit is set -- Data Memory Break Fault.  */
    176            ret = EXCP_DMB;
    177        }
    178        prot &= PAGE_READ | PAGE_EXEC;
    179    }
    180    if (unlikely(ent->t)) {
    181        if (!(type & PAGE_EXEC)) {
    182            /* The T bit is set -- Page Reference Fault.  */
    183            ret = EXCP_PAGE_REF;
    184        }
    185        prot &= PAGE_EXEC;
    186    }
    187
    188 egress:
    189    *pphys = phys;
    190    *pprot = prot;
    191    trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
    192    return ret;
    193}
    194
    195hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
    196{
    197    HPPACPU *cpu = HPPA_CPU(cs);
    198    hwaddr phys;
    199    int prot, excp;
    200
    201    /* If the (data) mmu is disabled, bypass translation.  */
    202    /* ??? We really ought to know if the code mmu is disabled too,
    203       in order to get the correct debugging dumps.  */
    204    if (!(cpu->env.psw & PSW_D)) {
    205        return addr;
    206    }
    207
    208    excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
    209                                     &phys, &prot);
    210
    211    /* Since we're translating for debugging, the only error that is a
    212       hard error is no translation at all.  Otherwise, while a real cpu
    213       access might not have permission, the debugger does.  */
    214    return excp == EXCP_DTLB_MISS ? -1 : phys;
    215}
    216
    217bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
    218                       MMUAccessType type, int mmu_idx,
    219                       bool probe, uintptr_t retaddr)
    220{
    221    HPPACPU *cpu = HPPA_CPU(cs);
    222    CPUHPPAState *env = &cpu->env;
    223    int prot, excp, a_prot;
    224    hwaddr phys;
    225
    226    switch (type) {
    227    case MMU_INST_FETCH:
    228        a_prot = PAGE_EXEC;
    229        break;
    230    case MMU_DATA_STORE:
    231        a_prot = PAGE_WRITE;
    232        break;
    233    default:
    234        a_prot = PAGE_READ;
    235        break;
    236    }
    237
    238    excp = hppa_get_physical_address(env, addr, mmu_idx,
    239                                     a_prot, &phys, &prot);
    240    if (unlikely(excp >= 0)) {
    241        if (probe) {
    242            return false;
    243        }
    244        trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
    245        /* Failure.  Raise the indicated exception.  */
    246        cs->exception_index = excp;
    247        if (cpu->env.psw & PSW_Q) {
    248            /* ??? Needs tweaking for hppa64.  */
    249            cpu->env.cr[CR_IOR] = addr;
    250            cpu->env.cr[CR_ISR] = addr >> 32;
    251        }
    252        cpu_loop_exit_restore(cs, retaddr);
    253    }
    254
    255    trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
    256                                phys & TARGET_PAGE_MASK, size, type, mmu_idx);
    257    /* Success!  Store the translation into the QEMU TLB.  */
    258    tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
    259                 prot, mmu_idx, TARGET_PAGE_SIZE);
    260    return true;
    261}
    262
    263/* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
    264void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
    265{
    266    hppa_tlb_entry *empty = NULL;
    267    int i;
    268
    269    /* Zap any old entries covering ADDR; notice empty entries on the way.  */
    270    for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
    271        hppa_tlb_entry *ent = &env->tlb[i];
    272        if (ent->va_b <= addr && addr <= ent->va_e) {
    273            if (ent->entry_valid) {
    274                hppa_flush_tlb_ent(env, ent);
    275            }
    276            if (!empty) {
    277                empty = ent;
    278            }
    279        }
    280    }
    281
    282    /* If we didn't see an empty entry, evict one.  */
    283    if (empty == NULL) {
    284        empty = hppa_alloc_tlb_ent(env);
    285    }
    286
    287    /* Note that empty->entry_valid == 0 already.  */
    288    empty->va_b = addr & TARGET_PAGE_MASK;
    289    empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
    290    empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
    291    trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
    292}
    293
    294/* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
    295void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
    296{
    297    hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
    298
    299    if (unlikely(ent == NULL)) {
    300        qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
    301        return;
    302    }
    303
    304    ent->access_id = extract32(reg, 1, 18);
    305    ent->u = extract32(reg, 19, 1);
    306    ent->ar_pl2 = extract32(reg, 20, 2);
    307    ent->ar_pl1 = extract32(reg, 22, 2);
    308    ent->ar_type = extract32(reg, 24, 3);
    309    ent->b = extract32(reg, 27, 1);
    310    ent->d = extract32(reg, 28, 1);
    311    ent->t = extract32(reg, 29, 1);
    312    ent->entry_valid = 1;
    313    trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
    314                         ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
    315}
    316
    317/* Purge (Insn/Data) TLB.  This is explicitly page-based, and is
    318   synchronous across all processors.  */
    319static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
    320{
    321    CPUHPPAState *env = cpu->env_ptr;
    322    target_ulong addr = (target_ulong) data.target_ptr;
    323    hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
    324
    325    if (ent && ent->entry_valid) {
    326        hppa_flush_tlb_ent(env, ent);
    327    }
    328}
    329
    330void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
    331{
    332    CPUState *src = env_cpu(env);
    333    CPUState *cpu;
    334    trace_hppa_tlb_ptlb(env);
    335    run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
    336
    337    CPU_FOREACH(cpu) {
    338        if (cpu != src) {
    339            async_run_on_cpu(cpu, ptlb_work, data);
    340        }
    341    }
    342    async_safe_run_on_cpu(src, ptlb_work, data);
    343}
    344
    345/* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
    346   number of pages/entries (we choose all), and is local to the cpu.  */
    347void HELPER(ptlbe)(CPUHPPAState *env)
    348{
    349    trace_hppa_tlb_ptlbe(env);
    350    memset(env->tlb, 0, sizeof(env->tlb));
    351    tlb_flush_by_mmuidx(env_cpu(env), 0xf);
    352}
    353
    354void cpu_hppa_change_prot_id(CPUHPPAState *env)
    355{
    356    if (env->psw & PSW_P) {
    357        tlb_flush_by_mmuidx(env_cpu(env), 0xf);
    358    }
    359}
    360
    361void HELPER(change_prot_id)(CPUHPPAState *env)
    362{
    363    cpu_hppa_change_prot_id(env);
    364}
    365
    366target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
    367{
    368    hwaddr phys;
    369    int prot, excp;
    370
    371    excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
    372                                     &phys, &prot);
    373    if (excp >= 0) {
    374        if (env->psw & PSW_Q) {
    375            /* ??? Needs tweaking for hppa64.  */
    376            env->cr[CR_IOR] = addr;
    377            env->cr[CR_ISR] = addr >> 32;
    378        }
    379        if (excp == EXCP_DTLB_MISS) {
    380            excp = EXCP_NA_DTLB_MISS;
    381        }
    382        trace_hppa_tlb_lpa_failed(env, addr);
    383        hppa_dynamic_excp(env, excp, GETPC());
    384    }
    385    trace_hppa_tlb_lpa_success(env, addr, phys);
    386    return phys;
    387}
    388
    389/* Return the ar_type of the TLB at VADDR, or -1.  */
    390int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
    391{
    392    hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
    393    return ent ? ent->ar_type : -1;
    394}
    395#endif /* CONFIG_USER_ONLY */