cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

mmu-hash64.c (34688B)


      1/*
      2 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
      3 *
      4 *  Copyright (c) 2003-2007 Jocelyn Mayer
      5 *  Copyright (c) 2013 David Gibson, IBM Corporation
      6 *
      7 * This library is free software; you can redistribute it and/or
      8 * modify it under the terms of the GNU Lesser General Public
      9 * License as published by the Free Software Foundation; either
     10 * version 2.1 of the License, or (at your option) any later version.
     11 *
     12 * This library is distributed in the hope that it will be useful,
     13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15 * Lesser General Public License for more details.
     16 *
     17 * You should have received a copy of the GNU Lesser General Public
     18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     19 */
     20#include "qemu/osdep.h"
     21#include "qemu/units.h"
     22#include "cpu.h"
     23#include "exec/exec-all.h"
     24#include "qemu/error-report.h"
     25#include "qemu/qemu-print.h"
     26#include "sysemu/hw_accel.h"
     27#include "kvm_ppc.h"
     28#include "mmu-hash64.h"
     29#include "exec/log.h"
     30#include "hw/hw.h"
     31#include "internal.h"
     32#include "mmu-book3s-v3.h"
     33#include "helper_regs.h"
     34
     35#ifdef CONFIG_TCG
     36#include "exec/helper-proto.h"
     37#endif
     38
     39/* #define DEBUG_SLB */
     40
     41#ifdef DEBUG_SLB
     42#  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
     43#else
     44#  define LOG_SLB(...) do { } while (0)
     45#endif
     46
     47/*
     48 * SLB handling
     49 */
     50
     51static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
     52{
     53    CPUPPCState *env = &cpu->env;
     54    uint64_t esid_256M, esid_1T;
     55    int n;
     56
     57    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
     58
     59    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
     60    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
     61
     62    for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
     63        ppc_slb_t *slb = &env->slb[n];
     64
     65        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
     66                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
     67        /*
     68         * We check for 1T matches on all MMUs here - if the MMU
     69         * doesn't have 1T segment support, we will have prevented 1T
     70         * entries from being inserted in the slbmte code.
     71         */
     72        if (((slb->esid == esid_256M) &&
     73             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
     74            || ((slb->esid == esid_1T) &&
     75                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
     76            return slb;
     77        }
     78    }
     79
     80    return NULL;
     81}
     82
     83void dump_slb(PowerPCCPU *cpu)
     84{
     85    CPUPPCState *env = &cpu->env;
     86    int i;
     87    uint64_t slbe, slbv;
     88
     89    cpu_synchronize_state(CPU(cpu));
     90
     91    qemu_printf("SLB\tESID\t\t\tVSID\n");
     92    for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
     93        slbe = env->slb[i].esid;
     94        slbv = env->slb[i].vsid;
     95        if (slbe == 0 && slbv == 0) {
     96            continue;
     97        }
     98        qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
     99                    i, slbe, slbv);
    100    }
    101}
    102
    103#ifdef CONFIG_TCG
    104void helper_slbia(CPUPPCState *env, uint32_t ih)
    105{
    106    PowerPCCPU *cpu = env_archcpu(env);
    107    int starting_entry;
    108    int n;
    109
    110    /*
    111     * slbia must always flush all TLB (which is equivalent to ERAT in ppc
    112     * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
    113     * can overwrite a valid SLB without flushing its lookaside information.
    114     *
    115     * It would be possible to keep the TLB in synch with the SLB by flushing
    116     * when a valid entry is overwritten by slbmte, and therefore slbia would
    117     * not have to flush unless it evicts a valid SLB entry. However it is
    118     * expected that slbmte is more common than slbia, and slbia is usually
    119     * going to evict valid SLB entries, so that tradeoff is unlikely to be a
    120     * good one.
    121     *
    122     * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
    123     * the same SLB entries (everything but entry 0), but differ in what
    124     * "lookaside information" is invalidated. TCG can ignore this and flush
    125     * everything.
    126     *
    127     * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
    128     * invalidated.
    129     */
    130
    131    env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
    132
    133    starting_entry = 1; /* default for IH=0,1,2,6 */
    134
    135    if (env->mmu_model == POWERPC_MMU_3_00) {
    136        switch (ih) {
    137        case 0x7:
    138            /* invalidate no SLBs, but all lookaside information */
    139            return;
    140
    141        case 0x3:
    142        case 0x4:
    143            /* also considers SLB entry 0 */
    144            starting_entry = 0;
    145            break;
    146
    147        case 0x5:
    148            /* treat undefined values as ih==0, and warn */
    149            qemu_log_mask(LOG_GUEST_ERROR,
    150                          "slbia undefined IH field %u.\n", ih);
    151            break;
    152
    153        default:
    154            /* 0,1,2,6 */
    155            break;
    156        }
    157    }
    158
    159    for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
    160        ppc_slb_t *slb = &env->slb[n];
    161
    162        if (!(slb->esid & SLB_ESID_V)) {
    163            continue;
    164        }
    165        if (env->mmu_model == POWERPC_MMU_3_00) {
    166            if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
    167                /* preserves entries with a class value of 0 */
    168                continue;
    169            }
    170        }
    171
    172        slb->esid &= ~SLB_ESID_V;
    173    }
    174}
    175
    176static void __helper_slbie(CPUPPCState *env, target_ulong addr,
    177                           target_ulong global)
    178{
    179    PowerPCCPU *cpu = env_archcpu(env);
    180    ppc_slb_t *slb;
    181
    182    slb = slb_lookup(cpu, addr);
    183    if (!slb) {
    184        return;
    185    }
    186
    187    if (slb->esid & SLB_ESID_V) {
    188        slb->esid &= ~SLB_ESID_V;
    189
    190        /*
    191         * XXX: given the fact that segment size is 256 MB or 1TB,
    192         *      and we still don't have a tlb_flush_mask(env, n, mask)
    193         *      in QEMU, we just invalidate all TLBs
    194         */
    195        env->tlb_need_flush |=
    196            (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
    197    }
    198}
    199
    200void helper_slbie(CPUPPCState *env, target_ulong addr)
    201{
    202    __helper_slbie(env, addr, false);
    203}
    204
    205void helper_slbieg(CPUPPCState *env, target_ulong addr)
    206{
    207    __helper_slbie(env, addr, true);
    208}
    209#endif
    210
    211int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
    212                  target_ulong esid, target_ulong vsid)
    213{
    214    CPUPPCState *env = &cpu->env;
    215    ppc_slb_t *slb = &env->slb[slot];
    216    const PPCHash64SegmentPageSizes *sps = NULL;
    217    int i;
    218
    219    if (slot >= cpu->hash64_opts->slb_size) {
    220        return -1; /* Bad slot number */
    221    }
    222    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
    223        return -1; /* Reserved bits set */
    224    }
    225    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
    226        return -1; /* Bad segment size */
    227    }
    228    if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
    229        return -1; /* 1T segment on MMU that doesn't support it */
    230    }
    231
    232    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    233        const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
    234
    235        if (!sps1->page_shift) {
    236            break;
    237        }
    238
    239        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
    240            sps = sps1;
    241            break;
    242        }
    243    }
    244
    245    if (!sps) {
    246        error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
    247                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
    248                     slot, esid, vsid);
    249        return -1;
    250    }
    251
    252    slb->esid = esid;
    253    slb->vsid = vsid;
    254    slb->sps = sps;
    255
    256    LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
    257            " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
    258            slb->esid, slb->vsid);
    259
    260    return 0;
    261}
    262
    263#ifdef CONFIG_TCG
    264static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
    265                             target_ulong *rt)
    266{
    267    CPUPPCState *env = &cpu->env;
    268    int slot = rb & 0xfff;
    269    ppc_slb_t *slb = &env->slb[slot];
    270
    271    if (slot >= cpu->hash64_opts->slb_size) {
    272        return -1;
    273    }
    274
    275    *rt = slb->esid;
    276    return 0;
    277}
    278
    279static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
    280                             target_ulong *rt)
    281{
    282    CPUPPCState *env = &cpu->env;
    283    int slot = rb & 0xfff;
    284    ppc_slb_t *slb = &env->slb[slot];
    285
    286    if (slot >= cpu->hash64_opts->slb_size) {
    287        return -1;
    288    }
    289
    290    *rt = slb->vsid;
    291    return 0;
    292}
    293
    294static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
    295                             target_ulong *rt)
    296{
    297    CPUPPCState *env = &cpu->env;
    298    ppc_slb_t *slb;
    299
    300    if (!msr_is_64bit(env, env->msr)) {
    301        rb &= 0xffffffff;
    302    }
    303    slb = slb_lookup(cpu, rb);
    304    if (slb == NULL) {
    305        *rt = (target_ulong)-1ul;
    306    } else {
    307        *rt = slb->vsid;
    308    }
    309    return 0;
    310}
    311
    312void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
    313{
    314    PowerPCCPU *cpu = env_archcpu(env);
    315
    316    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
    317        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    318                               POWERPC_EXCP_INVAL, GETPC());
    319    }
    320}
    321
    322target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
    323{
    324    PowerPCCPU *cpu = env_archcpu(env);
    325    target_ulong rt = 0;
    326
    327    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
    328        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    329                               POWERPC_EXCP_INVAL, GETPC());
    330    }
    331    return rt;
    332}
    333
    334target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
    335{
    336    PowerPCCPU *cpu = env_archcpu(env);
    337    target_ulong rt = 0;
    338
    339    if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
    340        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    341                               POWERPC_EXCP_INVAL, GETPC());
    342    }
    343    return rt;
    344}
    345
    346target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
    347{
    348    PowerPCCPU *cpu = env_archcpu(env);
    349    target_ulong rt = 0;
    350
    351    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
    352        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    353                               POWERPC_EXCP_INVAL, GETPC());
    354    }
    355    return rt;
    356}
    357#endif
    358
    359/* Check No-Execute or Guarded Storage */
    360static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
    361                                              ppc_hash_pte64_t pte)
    362{
    363    /* Exec permissions CANNOT take away read or write permissions */
    364    return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
    365            PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    366}
    367
    368/* Check Basic Storage Protection */
    369static int ppc_hash64_pte_prot(int mmu_idx,
    370                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
    371{
    372    unsigned pp, key;
    373    /*
    374     * Some pp bit combinations have undefined behaviour, so default
    375     * to no access in those cases
    376     */
    377    int prot = 0;
    378
    379    key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP)
    380             : (slb->vsid & SLB_VSID_KS));
    381    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
    382
    383    if (key == 0) {
    384        switch (pp) {
    385        case 0x0:
    386        case 0x1:
    387        case 0x2:
    388            prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    389            break;
    390
    391        case 0x3:
    392        case 0x6:
    393            prot = PAGE_READ | PAGE_EXEC;
    394            break;
    395        }
    396    } else {
    397        switch (pp) {
    398        case 0x0:
    399        case 0x6:
    400            break;
    401
    402        case 0x1:
    403        case 0x3:
    404            prot = PAGE_READ | PAGE_EXEC;
    405            break;
    406
    407        case 0x2:
    408            prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    409            break;
    410        }
    411    }
    412
    413    return prot;
    414}
    415
    416/* Check the instruction access permissions specified in the IAMR */
    417static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
    418{
    419    CPUPPCState *env = &cpu->env;
    420    int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
    421
    422    /*
    423     * An instruction fetch is permitted if the IAMR bit is 0.
    424     * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
    425     * can only take away EXEC permissions not READ or WRITE permissions.
    426     * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
    427     * EXEC permissions are allowed.
    428     */
    429    return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
    430                               PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    431}
    432
    433static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
    434{
    435    CPUPPCState *env = &cpu->env;
    436    int key, amrbits;
    437    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    438
    439    /* Only recent MMUs implement Virtual Page Class Key Protection */
    440    if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
    441        return prot;
    442    }
    443
    444    key = HPTE64_R_KEY(pte.pte1);
    445    amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
    446
    447    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    448    /*         env->spr[SPR_AMR]); */
    449
    450    /*
    451     * A store is permitted if the AMR bit is 0. Remove write
    452     * protection if it is set.
    453     */
    454    if (amrbits & 0x2) {
    455        prot &= ~PAGE_WRITE;
    456    }
    457    /*
    458     * A load is permitted if the AMR bit is 0. Remove read
    459     * protection if it is set.
    460     */
    461    if (amrbits & 0x1) {
    462        prot &= ~PAGE_READ;
    463    }
    464
    465    switch (env->mmu_model) {
    466    /*
    467     * MMU version 2.07 and later support IAMR
    468     * Check if the IAMR allows the instruction access - it will return
    469     * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
    470     * if it does (and prot will be unchanged indicating execution support).
    471     */
    472    case POWERPC_MMU_2_07:
    473    case POWERPC_MMU_3_00:
    474        prot &= ppc_hash64_iamr_prot(cpu, key);
    475        break;
    476    default:
    477        break;
    478    }
    479
    480    return prot;
    481}
    482
    483const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
    484                                             hwaddr ptex, int n)
    485{
    486    hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
    487    hwaddr base;
    488    hwaddr plen = n * HASH_PTE_SIZE_64;
    489    const ppc_hash_pte64_t *hptes;
    490
    491    if (cpu->vhyp) {
    492        PPCVirtualHypervisorClass *vhc =
    493            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    494        return vhc->map_hptes(cpu->vhyp, ptex, n);
    495    }
    496    base = ppc_hash64_hpt_base(cpu);
    497
    498    if (!base) {
    499        return NULL;
    500    }
    501
    502    hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
    503                              MEMTXATTRS_UNSPECIFIED);
    504    if (plen < (n * HASH_PTE_SIZE_64)) {
    505        hw_error("%s: Unable to map all requested HPTEs\n", __func__);
    506    }
    507    return hptes;
    508}
    509
    510void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
    511                            hwaddr ptex, int n)
    512{
    513    if (cpu->vhyp) {
    514        PPCVirtualHypervisorClass *vhc =
    515            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    516        vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
    517        return;
    518    }
    519
    520    address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
    521                        false, n * HASH_PTE_SIZE_64);
    522}
    523
    524static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
    525                                uint64_t pte0, uint64_t pte1)
    526{
    527    int i;
    528
    529    if (!(pte0 & HPTE64_V_LARGE)) {
    530        if (sps->page_shift != 12) {
    531            /* 4kiB page in a non 4kiB segment */
    532            return 0;
    533        }
    534        /* Normal 4kiB page */
    535        return 12;
    536    }
    537
    538    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    539        const PPCHash64PageSize *ps = &sps->enc[i];
    540        uint64_t mask;
    541
    542        if (!ps->page_shift) {
    543            break;
    544        }
    545
    546        if (ps->page_shift == 12) {
    547            /* L bit is set so this can't be a 4kiB page */
    548            continue;
    549        }
    550
    551        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
    552
    553        if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
    554            return ps->page_shift;
    555        }
    556    }
    557
    558    return 0; /* Bad page size encoding */
    559}
    560
    561static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
    562{
    563    /* Insert B into pte0 */
    564    *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
    565            ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
    566             (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
    567
    568    /* Remove B from pte1 */
    569    *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
    570}
    571
    572
    573static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
    574                                     const PPCHash64SegmentPageSizes *sps,
    575                                     target_ulong ptem,
    576                                     ppc_hash_pte64_t *pte, unsigned *pshift)
    577{
    578    int i;
    579    const ppc_hash_pte64_t *pteg;
    580    target_ulong pte0, pte1;
    581    target_ulong ptex;
    582
    583    ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
    584    pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
    585    if (!pteg) {
    586        return -1;
    587    }
    588    for (i = 0; i < HPTES_PER_GROUP; i++) {
    589        pte0 = ppc_hash64_hpte0(cpu, pteg, i);
    590        /*
    591         * pte0 contains the valid bit and must be read before pte1,
    592         * otherwise we might see an old pte1 with a new valid bit and
    593         * thus an inconsistent hpte value
    594         */
    595        smp_rmb();
    596        pte1 = ppc_hash64_hpte1(cpu, pteg, i);
    597
    598        /* Convert format if necessary */
    599        if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
    600            ppc64_v3_new_to_old_hpte(&pte0, &pte1);
    601        }
    602
    603        /* This compares V, B, H (secondary) and the AVPN */
    604        if (HPTE64_V_COMPARE(pte0, ptem)) {
    605            *pshift = hpte_page_shift(sps, pte0, pte1);
    606            /*
    607             * If there is no match, ignore the PTE, it could simply
    608             * be for a different segment size encoding and the
    609             * architecture specifies we should not match. Linux will
    610             * potentially leave behind PTEs for the wrong base page
    611             * size when demoting segments.
    612             */
    613            if (*pshift == 0) {
    614                continue;
    615            }
    616            /*
    617             * We don't do anything with pshift yet as qemu TLB only
    618             * deals with 4K pages anyway
    619             */
    620            pte->pte0 = pte0;
    621            pte->pte1 = pte1;
    622            ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
    623            return ptex + i;
    624        }
    625    }
    626    ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
    627    /*
    628     * We didn't find a valid entry.
    629     */
    630    return -1;
    631}
    632
    633static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
    634                                     ppc_slb_t *slb, target_ulong eaddr,
    635                                     ppc_hash_pte64_t *pte, unsigned *pshift)
    636{
    637    CPUPPCState *env = &cpu->env;
    638    hwaddr hash, ptex;
    639    uint64_t vsid, epnmask, epn, ptem;
    640    const PPCHash64SegmentPageSizes *sps = slb->sps;
    641
    642    /*
    643     * The SLB store path should prevent any bad page size encodings
    644     * getting in there, so:
    645     */
    646    assert(sps);
    647
    648    /* If ISL is set in LPCR we need to clamp the page size to 4K */
    649    if (env->spr[SPR_LPCR] & LPCR_ISL) {
    650        /* We assume that when using TCG, 4k is first entry of SPS */
    651        sps = &cpu->hash64_opts->sps[0];
    652        assert(sps->page_shift == 12);
    653    }
    654
    655    epnmask = ~((1ULL << sps->page_shift) - 1);
    656
    657    if (slb->vsid & SLB_VSID_B) {
    658        /* 1TB segment */
    659        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
    660        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
    661        hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
    662    } else {
    663        /* 256M segment */
    664        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
    665        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
    666        hash = vsid ^ (epn >> sps->page_shift);
    667    }
    668    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
    669    ptem |= HPTE64_V_VALID;
    670
    671    /* Page address translation */
    672    qemu_log_mask(CPU_LOG_MMU,
    673            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
    674            " hash " TARGET_FMT_plx "\n",
    675            ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
    676
    677    /* Primary PTEG lookup */
    678    qemu_log_mask(CPU_LOG_MMU,
    679            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
    680            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
    681            " hash=" TARGET_FMT_plx "\n",
    682            ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
    683            vsid, ptem,  hash);
    684    ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
    685
    686    if (ptex == -1) {
    687        /* Secondary PTEG lookup */
    688        ptem |= HPTE64_V_SECONDARY;
    689        qemu_log_mask(CPU_LOG_MMU,
    690                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
    691                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
    692                " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
    693                ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
    694
    695        ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
    696    }
    697
    698    return ptex;
    699}
    700
    701unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
    702                                          uint64_t pte0, uint64_t pte1)
    703{
    704    int i;
    705
    706    if (!(pte0 & HPTE64_V_LARGE)) {
    707        return 12;
    708    }
    709
    710    /*
    711     * The encodings in env->sps need to be carefully chosen so that
    712     * this gives an unambiguous result.
    713     */
    714    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    715        const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
    716        unsigned shift;
    717
    718        if (!sps->page_shift) {
    719            break;
    720        }
    721
    722        shift = hpte_page_shift(sps, pte0, pte1);
    723        if (shift) {
    724            return shift;
    725        }
    726    }
    727
    728    return 0;
    729}
    730
    731static bool ppc_hash64_use_vrma(CPUPPCState *env)
    732{
    733    switch (env->mmu_model) {
    734    case POWERPC_MMU_3_00:
    735        /*
    736         * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
    737         * register no longer exist
    738         */
    739        return true;
    740
    741    default:
    742        return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
    743    }
    744}
    745
    746static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t error_code)
    747{
    748    CPUPPCState *env = &POWERPC_CPU(cs)->env;
    749    bool vpm;
    750
    751    if (!mmuidx_real(mmu_idx)) {
    752        vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
    753    } else {
    754        vpm = ppc_hash64_use_vrma(env);
    755    }
    756    if (vpm && !mmuidx_hv(mmu_idx)) {
    757        cs->exception_index = POWERPC_EXCP_HISI;
    758    } else {
    759        cs->exception_index = POWERPC_EXCP_ISI;
    760    }
    761    env->error_code = error_code;
    762}
    763
    764static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t dsisr)
    765{
    766    CPUPPCState *env = &POWERPC_CPU(cs)->env;
    767    bool vpm;
    768
    769    if (!mmuidx_real(mmu_idx)) {
    770        vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
    771    } else {
    772        vpm = ppc_hash64_use_vrma(env);
    773    }
    774    if (vpm && !mmuidx_hv(mmu_idx)) {
    775        cs->exception_index = POWERPC_EXCP_HDSI;
    776        env->spr[SPR_HDAR] = dar;
    777        env->spr[SPR_HDSISR] = dsisr;
    778    } else {
    779        cs->exception_index = POWERPC_EXCP_DSI;
    780        env->spr[SPR_DAR] = dar;
    781        env->spr[SPR_DSISR] = dsisr;
    782   }
    783    env->error_code = 0;
    784}
    785
    786
    787static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
    788{
    789    hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
    790
    791    if (cpu->vhyp) {
    792        PPCVirtualHypervisorClass *vhc =
    793            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    794        vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
    795        return;
    796    }
    797    base = ppc_hash64_hpt_base(cpu);
    798
    799
    800    /* The HW performs a non-atomic byte update */
    801    stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
    802}
    803
    804static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
    805{
    806    hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
    807
    808    if (cpu->vhyp) {
    809        PPCVirtualHypervisorClass *vhc =
    810            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    811        vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
    812        return;
    813    }
    814    base = ppc_hash64_hpt_base(cpu);
    815
    816    /* The HW performs a non-atomic byte update */
    817    stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
    818}
    819
    820static target_ulong rmls_limit(PowerPCCPU *cpu)
    821{
    822    CPUPPCState *env = &cpu->env;
    823    /*
    824     * In theory the meanings of RMLS values are implementation
    825     * dependent.  In practice, this seems to have been the set from
    826     * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
    827     *
    828     * Unsupported values mean the OS has shot itself in the
    829     * foot. Return a 0-sized RMA in this case, which we expect
    830     * to trigger an immediate DSI or ISI
    831     */
    832    static const target_ulong rma_sizes[16] = {
    833        [0] = 256 * GiB,
    834        [1] = 16 * GiB,
    835        [2] = 1 * GiB,
    836        [3] = 64 * MiB,
    837        [4] = 256 * MiB,
    838        [7] = 128 * MiB,
    839        [8] = 32 * MiB,
    840    };
    841    target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
    842
    843    return rma_sizes[rmls];
    844}
    845
    846static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
    847{
    848    CPUPPCState *env = &cpu->env;
    849    target_ulong lpcr = env->spr[SPR_LPCR];
    850    uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
    851    target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
    852    int i;
    853
    854    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    855        const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
    856
    857        if (!sps->page_shift) {
    858            break;
    859        }
    860
    861        if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
    862            slb->esid = SLB_ESID_V;
    863            slb->vsid = vsid;
    864            slb->sps = sps;
    865            return 0;
    866        }
    867    }
    868
    869    error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
    870                 TARGET_FMT_lx, lpcr);
    871
    872    return -1;
    873}
    874
    875bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
    876                      hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
    877                      bool guest_visible)
    878{
    879    CPUState *cs = CPU(cpu);
    880    CPUPPCState *env = &cpu->env;
    881    ppc_slb_t vrma_slbe;
    882    ppc_slb_t *slb;
    883    unsigned apshift;
    884    hwaddr ptex;
    885    ppc_hash_pte64_t pte;
    886    int exec_prot, pp_prot, amr_prot, prot;
    887    int need_prot;
    888    hwaddr raddr;
    889
    890    /*
    891     * Note on LPCR usage: 970 uses HID4, but our special variant of
    892     * store_spr copies relevant fields into env->spr[SPR_LPCR].
    893     * Similarly we filter unimplemented bits when storing into LPCR
    894     * depending on the MMU version. This code can thus just use the
    895     * LPCR "as-is".
    896     */
    897
    898    /* 1. Handle real mode accesses */
    899    if (mmuidx_real(mmu_idx)) {
    900        /*
    901         * Translation is supposedly "off", but in real mode the top 4
    902         * effective address bits are (mostly) ignored
    903         */
    904        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
    905
    906        if (cpu->vhyp) {
    907            /*
    908             * In virtual hypervisor mode, there's nothing to do:
    909             *   EA == GPA == qemu guest address
    910             */
    911        } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
    912            /* In HV mode, add HRMOR if top EA bit is clear */
    913            if (!(eaddr >> 63)) {
    914                raddr |= env->spr[SPR_HRMOR];
    915            }
    916        } else if (ppc_hash64_use_vrma(env)) {
    917            /* Emulated VRMA mode */
    918            slb = &vrma_slbe;
    919            if (build_vrma_slbe(cpu, slb) != 0) {
    920                /* Invalid VRMA setup, machine check */
    921                if (guest_visible) {
    922                    cs->exception_index = POWERPC_EXCP_MCHECK;
    923                    env->error_code = 0;
    924                }
    925                return false;
    926            }
    927
    928            goto skip_slb_search;
    929        } else {
    930            target_ulong limit = rmls_limit(cpu);
    931
    932            /* Emulated old-style RMO mode, bounds check against RMLS */
    933            if (raddr >= limit) {
    934                if (!guest_visible) {
    935                    return false;
    936                }
    937                switch (access_type) {
    938                case MMU_INST_FETCH:
    939                    ppc_hash64_set_isi(cs, mmu_idx, SRR1_PROTFAULT);
    940                    break;
    941                case MMU_DATA_LOAD:
    942                    ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_PROTFAULT);
    943                    break;
    944                case MMU_DATA_STORE:
    945                    ppc_hash64_set_dsi(cs, mmu_idx, eaddr,
    946                                       DSISR_PROTFAULT | DSISR_ISSTORE);
    947                    break;
    948                default:
    949                    g_assert_not_reached();
    950                }
    951                return false;
    952            }
    953
    954            raddr |= env->spr[SPR_RMOR];
    955        }
    956
    957        *raddrp = raddr;
    958        *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    959        *psizep = TARGET_PAGE_BITS;
    960        return true;
    961    }
    962
    963    /* 2. Translation is on, so look up the SLB */
    964    slb = slb_lookup(cpu, eaddr);
    965    if (!slb) {
    966        /* No entry found, check if in-memory segment tables are in use */
    967        if (ppc64_use_proc_tbl(cpu)) {
    968            /* TODO - Unsupported */
    969            error_report("Segment Table Support Unimplemented");
    970            exit(1);
    971        }
    972        /* Segment still not found, generate the appropriate interrupt */
    973        if (!guest_visible) {
    974            return false;
    975        }
    976        switch (access_type) {
    977        case MMU_INST_FETCH:
    978            cs->exception_index = POWERPC_EXCP_ISEG;
    979            env->error_code = 0;
    980            break;
    981        case MMU_DATA_LOAD:
    982        case MMU_DATA_STORE:
    983            cs->exception_index = POWERPC_EXCP_DSEG;
    984            env->error_code = 0;
    985            env->spr[SPR_DAR] = eaddr;
    986            break;
    987        default:
    988            g_assert_not_reached();
    989        }
    990        return false;
    991    }
    992
    993 skip_slb_search:
    994
    995    /* 3. Check for segment level no-execute violation */
    996    if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
    997        if (guest_visible) {
    998            ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOEXEC_GUARD);
    999        }
   1000        return false;
   1001    }
   1002
   1003    /* 4. Locate the PTE in the hash table */
   1004    ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
   1005    if (ptex == -1) {
   1006        if (!guest_visible) {
   1007            return false;
   1008        }
   1009        switch (access_type) {
   1010        case MMU_INST_FETCH:
   1011            ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOPTE);
   1012            break;
   1013        case MMU_DATA_LOAD:
   1014            ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE);
   1015            break;
   1016        case MMU_DATA_STORE:
   1017            ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE | DSISR_ISSTORE);
   1018            break;
   1019        default:
   1020            g_assert_not_reached();
   1021        }
   1022        return false;
   1023    }
   1024    qemu_log_mask(CPU_LOG_MMU,
   1025                  "found PTE at index %08" HWADDR_PRIx "\n", ptex);
   1026
   1027    /* 5. Check access permissions */
   1028
   1029    exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
   1030    pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
   1031    amr_prot = ppc_hash64_amr_prot(cpu, pte);
   1032    prot = exec_prot & pp_prot & amr_prot;
   1033
   1034    need_prot = prot_for_access_type(access_type);
   1035    if (need_prot & ~prot) {
   1036        /* Access right violation */
   1037        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
   1038        if (!guest_visible) {
   1039            return false;
   1040        }
   1041        if (access_type == MMU_INST_FETCH) {
   1042            int srr1 = 0;
   1043            if (PAGE_EXEC & ~exec_prot) {
   1044                srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
   1045            } else if (PAGE_EXEC & ~pp_prot) {
   1046                srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
   1047            }
   1048            if (PAGE_EXEC & ~amr_prot) {
   1049                srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
   1050            }
   1051            ppc_hash64_set_isi(cs, mmu_idx, srr1);
   1052        } else {
   1053            int dsisr = 0;
   1054            if (need_prot & ~pp_prot) {
   1055                dsisr |= DSISR_PROTFAULT;
   1056            }
   1057            if (access_type == MMU_DATA_STORE) {
   1058                dsisr |= DSISR_ISSTORE;
   1059            }
   1060            if (need_prot & ~amr_prot) {
   1061                dsisr |= DSISR_AMR;
   1062            }
   1063            ppc_hash64_set_dsi(cs, mmu_idx, eaddr, dsisr);
   1064        }
   1065        return false;
   1066    }
   1067
   1068    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
   1069
   1070    /* 6. Update PTE referenced and changed bits if necessary */
   1071
   1072    if (!(pte.pte1 & HPTE64_R_R)) {
   1073        ppc_hash64_set_r(cpu, ptex, pte.pte1);
   1074    }
   1075    if (!(pte.pte1 & HPTE64_R_C)) {
   1076        if (access_type == MMU_DATA_STORE) {
   1077            ppc_hash64_set_c(cpu, ptex, pte.pte1);
   1078        } else {
   1079            /*
   1080             * Treat the page as read-only for now, so that a later write
   1081             * will pass through this function again to set the C bit
   1082             */
   1083            prot &= ~PAGE_WRITE;
   1084        }
   1085    }
   1086
   1087    /* 7. Determine the real address from the PTE */
   1088
   1089    *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
   1090    *protp = prot;
   1091    *psizep = apshift;
   1092    return true;
   1093}
   1094
   1095void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
   1096                               target_ulong pte0, target_ulong pte1)
   1097{
   1098    /*
   1099     * XXX: given the fact that there are too many segments to
   1100     * invalidate, and we still don't have a tlb_flush_mask(env, n,
   1101     * mask) in QEMU, we just invalidate all TLBs
   1102     */
   1103    cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
   1104}
   1105
   1106#ifdef CONFIG_TCG
   1107void helper_store_lpcr(CPUPPCState *env, target_ulong val)
   1108{
   1109    PowerPCCPU *cpu = env_archcpu(env);
   1110
   1111    ppc_store_lpcr(cpu, val);
   1112}
   1113#endif
   1114
   1115void ppc_hash64_init(PowerPCCPU *cpu)
   1116{
   1117    CPUPPCState *env = &cpu->env;
   1118    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
   1119
   1120    if (!pcc->hash64_opts) {
   1121        assert(!mmu_is_64bit(env->mmu_model));
   1122        return;
   1123    }
   1124
   1125    cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
   1126}
   1127
   1128void ppc_hash64_finalize(PowerPCCPU *cpu)
   1129{
   1130    g_free(cpu->hash64_opts);
   1131}
   1132
   1133const PPCHash64Options ppc_hash64_opts_basic = {
   1134    .flags = 0,
   1135    .slb_size = 64,
   1136    .sps = {
   1137        { .page_shift = 12, /* 4K */
   1138          .slb_enc = 0,
   1139          .enc = { { .page_shift = 12, .pte_enc = 0 } }
   1140        },
   1141        { .page_shift = 24, /* 16M */
   1142          .slb_enc = 0x100,
   1143          .enc = { { .page_shift = 24, .pte_enc = 0 } }
   1144        },
   1145    },
   1146};
   1147
   1148const PPCHash64Options ppc_hash64_opts_POWER7 = {
   1149    .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
   1150    .slb_size = 32,
   1151    .sps = {
   1152        {
   1153            .page_shift = 12, /* 4K */
   1154            .slb_enc = 0,
   1155            .enc = { { .page_shift = 12, .pte_enc = 0 },
   1156                     { .page_shift = 16, .pte_enc = 0x7 },
   1157                     { .page_shift = 24, .pte_enc = 0x38 }, },
   1158        },
   1159        {
   1160            .page_shift = 16, /* 64K */
   1161            .slb_enc = SLB_VSID_64K,
   1162            .enc = { { .page_shift = 16, .pte_enc = 0x1 },
   1163                     { .page_shift = 24, .pte_enc = 0x8 }, },
   1164        },
   1165        {
   1166            .page_shift = 24, /* 16M */
   1167            .slb_enc = SLB_VSID_16M,
   1168            .enc = { { .page_shift = 24, .pte_enc = 0 }, },
   1169        },
   1170        {
   1171            .page_shift = 34, /* 16G */
   1172            .slb_enc = SLB_VSID_16G,
   1173            .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
   1174        },
   1175    }
   1176};
   1177
   1178