cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

tlb_helper.c (45487B)


      1/*
      2 * MIPS TLB (Translation lookaside buffer) helpers.
      3 *
      4 *  Copyright (c) 2004-2005 Jocelyn Mayer
      5 *
      6 * This library is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * This library is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19#include "qemu/osdep.h"
     20#include "qemu/bitops.h"
     21
     22#include "cpu.h"
     23#include "internal.h"
     24#include "exec/exec-all.h"
     25#include "exec/cpu_ldst.h"
     26#include "exec/log.h"
     27#include "hw/mips/cpudevs.h"
     28#include "exec/helper-proto.h"
     29
     30/* TLB management */
     31static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
     32{
     33    /* Discard entries from env->tlb[first] onwards.  */
     34    while (env->tlb->tlb_in_use > first) {
     35        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
     36    }
     37}
     38
     39static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
     40{
     41#if defined(TARGET_MIPS64)
     42    return extract64(entrylo, 6, 54);
     43#else
     44    return extract64(entrylo, 6, 24) | /* PFN */
     45           (extract64(entrylo, 32, 32) << 24); /* PFNX */
     46#endif
     47}
     48
     49static void r4k_fill_tlb(CPUMIPSState *env, int idx)
     50{
     51    r4k_tlb_t *tlb;
     52    uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
     53
     54    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
     55    tlb = &env->tlb->mmu.r4k.tlb[idx];
     56    if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
     57        tlb->EHINV = 1;
     58        return;
     59    }
     60    tlb->EHINV = 0;
     61    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
     62#if defined(TARGET_MIPS64)
     63    tlb->VPN &= env->SEGMask;
     64#endif
     65    tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
     66    tlb->MMID = env->CP0_MemoryMapID;
     67    tlb->PageMask = env->CP0_PageMask;
     68    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
     69    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
     70    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
     71    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
     72    tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
     73    tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
     74    tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
     75    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
     76    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
     77    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
     78    tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
     79    tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
     80    tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
     81}
     82
     83static void r4k_helper_tlbinv(CPUMIPSState *env)
     84{
     85    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
     86    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
     87    uint32_t MMID = env->CP0_MemoryMapID;
     88    uint32_t tlb_mmid;
     89    r4k_tlb_t *tlb;
     90    int idx;
     91
     92    MMID = mi ? MMID : (uint32_t) ASID;
     93    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
     94        tlb = &env->tlb->mmu.r4k.tlb[idx];
     95        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
     96        if (!tlb->G && tlb_mmid == MMID) {
     97            tlb->EHINV = 1;
     98        }
     99    }
    100    cpu_mips_tlb_flush(env);
    101}
    102
    103static void r4k_helper_tlbinvf(CPUMIPSState *env)
    104{
    105    int idx;
    106
    107    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
    108        env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
    109    }
    110    cpu_mips_tlb_flush(env);
    111}
    112
    113static void r4k_helper_tlbwi(CPUMIPSState *env)
    114{
    115    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
    116    target_ulong VPN;
    117    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
    118    uint32_t MMID = env->CP0_MemoryMapID;
    119    uint32_t tlb_mmid;
    120    bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
    121    r4k_tlb_t *tlb;
    122    int idx;
    123
    124    MMID = mi ? MMID : (uint32_t) ASID;
    125
    126    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
    127    tlb = &env->tlb->mmu.r4k.tlb[idx];
    128    VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
    129#if defined(TARGET_MIPS64)
    130    VPN &= env->SEGMask;
    131#endif
    132    EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
    133    G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
    134    V0 = (env->CP0_EntryLo0 & 2) != 0;
    135    D0 = (env->CP0_EntryLo0 & 4) != 0;
    136    XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
    137    RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
    138    V1 = (env->CP0_EntryLo1 & 2) != 0;
    139    D1 = (env->CP0_EntryLo1 & 4) != 0;
    140    XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
    141    RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
    142
    143    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
    144    /*
    145     * Discard cached TLB entries, unless tlbwi is just upgrading access
    146     * permissions on the current entry.
    147     */
    148    if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
    149        (!tlb->EHINV && EHINV) ||
    150        (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
    151        (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
    152        (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
    153        (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
    154        r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
    155    }
    156
    157    r4k_invalidate_tlb(env, idx, 0);
    158    r4k_fill_tlb(env, idx);
    159}
    160
    161static void r4k_helper_tlbwr(CPUMIPSState *env)
    162{
    163    int r = cpu_mips_get_random(env);
    164
    165    r4k_invalidate_tlb(env, r, 1);
    166    r4k_fill_tlb(env, r);
    167}
    168
    169static void r4k_helper_tlbp(CPUMIPSState *env)
    170{
    171    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
    172    r4k_tlb_t *tlb;
    173    target_ulong mask;
    174    target_ulong tag;
    175    target_ulong VPN;
    176    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
    177    uint32_t MMID = env->CP0_MemoryMapID;
    178    uint32_t tlb_mmid;
    179    int i;
    180
    181    MMID = mi ? MMID : (uint32_t) ASID;
    182    for (i = 0; i < env->tlb->nb_tlb; i++) {
    183        tlb = &env->tlb->mmu.r4k.tlb[i];
    184        /* 1k pages are not supported. */
    185        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
    186        tag = env->CP0_EntryHi & ~mask;
    187        VPN = tlb->VPN & ~mask;
    188#if defined(TARGET_MIPS64)
    189        tag &= env->SEGMask;
    190#endif
    191        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
    192        /* Check ASID/MMID, virtual page number & size */
    193        if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
    194            /* TLB match */
    195            env->CP0_Index = i;
    196            break;
    197        }
    198    }
    199    if (i == env->tlb->nb_tlb) {
    200        /* No match.  Discard any shadow entries, if any of them match.  */
    201        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
    202            tlb = &env->tlb->mmu.r4k.tlb[i];
    203            /* 1k pages are not supported. */
    204            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
    205            tag = env->CP0_EntryHi & ~mask;
    206            VPN = tlb->VPN & ~mask;
    207#if defined(TARGET_MIPS64)
    208            tag &= env->SEGMask;
    209#endif
    210            tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
    211            /* Check ASID/MMID, virtual page number & size */
    212            if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
    213                r4k_mips_tlb_flush_extra(env, i);
    214                break;
    215            }
    216        }
    217
    218        env->CP0_Index |= 0x80000000;
    219    }
    220}
    221
    222static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
    223{
    224#if defined(TARGET_MIPS64)
    225    return tlb_pfn << 6;
    226#else
    227    return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
    228           (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
    229#endif
    230}
    231
    232static void r4k_helper_tlbr(CPUMIPSState *env)
    233{
    234    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
    235    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
    236    uint32_t MMID = env->CP0_MemoryMapID;
    237    uint32_t tlb_mmid;
    238    r4k_tlb_t *tlb;
    239    int idx;
    240
    241    MMID = mi ? MMID : (uint32_t) ASID;
    242    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
    243    tlb = &env->tlb->mmu.r4k.tlb[idx];
    244
    245    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
    246    /* If this will change the current ASID/MMID, flush qemu's TLB.  */
    247    if (MMID != tlb_mmid) {
    248        cpu_mips_tlb_flush(env);
    249    }
    250
    251    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
    252
    253    if (tlb->EHINV) {
    254        env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
    255        env->CP0_PageMask = 0;
    256        env->CP0_EntryLo0 = 0;
    257        env->CP0_EntryLo1 = 0;
    258    } else {
    259        env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
    260        env->CP0_MemoryMapID = tlb->MMID;
    261        env->CP0_PageMask = tlb->PageMask;
    262        env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
    263                        ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
    264                        ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
    265                        get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
    266        env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
    267                        ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
    268                        ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
    269                        get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
    270    }
    271}
    272
    273void helper_tlbwi(CPUMIPSState *env)
    274{
    275    env->tlb->helper_tlbwi(env);
    276}
    277
    278void helper_tlbwr(CPUMIPSState *env)
    279{
    280    env->tlb->helper_tlbwr(env);
    281}
    282
    283void helper_tlbp(CPUMIPSState *env)
    284{
    285    env->tlb->helper_tlbp(env);
    286}
    287
    288void helper_tlbr(CPUMIPSState *env)
    289{
    290    env->tlb->helper_tlbr(env);
    291}
    292
    293void helper_tlbinv(CPUMIPSState *env)
    294{
    295    env->tlb->helper_tlbinv(env);
    296}
    297
    298void helper_tlbinvf(CPUMIPSState *env)
    299{
    300    env->tlb->helper_tlbinvf(env);
    301}
    302
    303static void global_invalidate_tlb(CPUMIPSState *env,
    304                           uint32_t invMsgVPN2,
    305                           uint8_t invMsgR,
    306                           uint32_t invMsgMMid,
    307                           bool invAll,
    308                           bool invVAMMid,
    309                           bool invMMid,
    310                           bool invVA)
    311{
    312
    313    int idx;
    314    r4k_tlb_t *tlb;
    315    bool VAMatch;
    316    bool MMidMatch;
    317
    318    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
    319        tlb = &env->tlb->mmu.r4k.tlb[idx];
    320        VAMatch =
    321            (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
    322#ifdef TARGET_MIPS64
    323            &&
    324            (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
    325#endif
    326            );
    327        MMidMatch = tlb->MMID == invMsgMMid;
    328        if ((invAll && (idx > env->CP0_Wired)) ||
    329            (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
    330            (VAMatch && invVA) ||
    331            (MMidMatch && !(tlb->G) && invMMid)) {
    332            tlb->EHINV = 1;
    333        }
    334    }
    335    cpu_mips_tlb_flush(env);
    336}
    337
    338void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
    339{
    340    bool invAll = type == 0;
    341    bool invVA = type == 1;
    342    bool invMMid = type == 2;
    343    bool invVAMMid = type == 3;
    344    uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
    345    uint8_t invMsgR = 0;
    346    uint32_t invMsgMMid = env->CP0_MemoryMapID;
    347    CPUState *other_cs = first_cpu;
    348
    349#ifdef TARGET_MIPS64
    350    invMsgR = extract64(arg, 62, 2);
    351#endif
    352
    353    CPU_FOREACH(other_cs) {
    354        MIPSCPU *other_cpu = MIPS_CPU(other_cs);
    355        global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
    356                              invAll, invVAMMid, invMMid, invVA);
    357    }
    358}
    359
    360/* no MMU emulation */
    361static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
    362                              target_ulong address, MMUAccessType access_type)
    363{
    364    *physical = address;
    365    *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    366    return TLBRET_MATCH;
    367}
    368
    369/* fixed mapping MMU emulation */
    370static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
    371                                 int *prot, target_ulong address,
    372                                 MMUAccessType access_type)
    373{
    374    if (address <= (int32_t)0x7FFFFFFFUL) {
    375        if (!(env->CP0_Status & (1 << CP0St_ERL))) {
    376            *physical = address + 0x40000000UL;
    377        } else {
    378            *physical = address;
    379        }
    380    } else if (address <= (int32_t)0xBFFFFFFFUL) {
    381        *physical = address & 0x1FFFFFFF;
    382    } else {
    383        *physical = address;
    384    }
    385
    386    *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    387    return TLBRET_MATCH;
    388}
    389
    390/* MIPS32/MIPS64 R4000-style MMU emulation */
    391static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
    392                           target_ulong address, MMUAccessType access_type)
    393{
    394    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
    395    uint32_t MMID = env->CP0_MemoryMapID;
    396    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
    397    uint32_t tlb_mmid;
    398    int i;
    399
    400    MMID = mi ? MMID : (uint32_t) ASID;
    401
    402    for (i = 0; i < env->tlb->tlb_in_use; i++) {
    403        r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
    404        /* 1k pages are not supported. */
    405        target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
    406        target_ulong tag = address & ~mask;
    407        target_ulong VPN = tlb->VPN & ~mask;
    408#if defined(TARGET_MIPS64)
    409        tag &= env->SEGMask;
    410#endif
    411
    412        /* Check ASID/MMID, virtual page number & size */
    413        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
    414        if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
    415            /* TLB match */
    416            int n = !!(address & mask & ~(mask >> 1));
    417            /* Check access rights */
    418            if (!(n ? tlb->V1 : tlb->V0)) {
    419                return TLBRET_INVALID;
    420            }
    421            if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
    422                return TLBRET_XI;
    423            }
    424            if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
    425                return TLBRET_RI;
    426            }
    427            if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
    428                *physical = tlb->PFN[n] | (address & (mask >> 1));
    429                *prot = PAGE_READ;
    430                if (n ? tlb->D1 : tlb->D0) {
    431                    *prot |= PAGE_WRITE;
    432                }
    433                if (!(n ? tlb->XI1 : tlb->XI0)) {
    434                    *prot |= PAGE_EXEC;
    435                }
    436                return TLBRET_MATCH;
    437            }
    438            return TLBRET_DIRTY;
    439        }
    440    }
    441    return TLBRET_NOMATCH;
    442}
    443
    444static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
    445{
    446    env->tlb->nb_tlb = 1;
    447    env->tlb->map_address = &no_mmu_map_address;
    448}
    449
    450static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
    451{
    452    env->tlb->nb_tlb = 1;
    453    env->tlb->map_address = &fixed_mmu_map_address;
    454}
    455
    456static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
    457{
    458    env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
    459    env->tlb->map_address = &r4k_map_address;
    460    env->tlb->helper_tlbwi = r4k_helper_tlbwi;
    461    env->tlb->helper_tlbwr = r4k_helper_tlbwr;
    462    env->tlb->helper_tlbp = r4k_helper_tlbp;
    463    env->tlb->helper_tlbr = r4k_helper_tlbr;
    464    env->tlb->helper_tlbinv = r4k_helper_tlbinv;
    465    env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
    466}
    467
    468void mmu_init(CPUMIPSState *env, const mips_def_t *def)
    469{
    470    env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
    471
    472    switch (def->mmu_type) {
    473    case MMU_TYPE_NONE:
    474        no_mmu_init(env, def);
    475        break;
    476    case MMU_TYPE_R4000:
    477        r4k_mmu_init(env, def);
    478        break;
    479    case MMU_TYPE_FMT:
    480        fixed_mmu_init(env, def);
    481        break;
    482    case MMU_TYPE_R3000:
    483    case MMU_TYPE_R6000:
    484    case MMU_TYPE_R8000:
    485    default:
    486        cpu_abort(env_cpu(env), "MMU type not supported\n");
    487    }
    488}
    489
    490void cpu_mips_tlb_flush(CPUMIPSState *env)
    491{
    492    /* Flush qemu's TLB and discard all shadowed entries.  */
    493    tlb_flush(env_cpu(env));
    494    env->tlb->tlb_in_use = env->tlb->nb_tlb;
    495}
    496
    497static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
    498                                MMUAccessType access_type, int tlb_error)
    499{
    500    CPUState *cs = env_cpu(env);
    501    int exception = 0, error_code = 0;
    502
    503    if (access_type == MMU_INST_FETCH) {
    504        error_code |= EXCP_INST_NOTAVAIL;
    505    }
    506
    507    switch (tlb_error) {
    508    default:
    509    case TLBRET_BADADDR:
    510        /* Reference to kernel address from user mode or supervisor mode */
    511        /* Reference to supervisor address from user mode */
    512        if (access_type == MMU_DATA_STORE) {
    513            exception = EXCP_AdES;
    514        } else {
    515            exception = EXCP_AdEL;
    516        }
    517        break;
    518    case TLBRET_NOMATCH:
    519        /* No TLB match for a mapped address */
    520        if (access_type == MMU_DATA_STORE) {
    521            exception = EXCP_TLBS;
    522        } else {
    523            exception = EXCP_TLBL;
    524        }
    525        error_code |= EXCP_TLB_NOMATCH;
    526        break;
    527    case TLBRET_INVALID:
    528        /* TLB match with no valid bit */
    529        if (access_type == MMU_DATA_STORE) {
    530            exception = EXCP_TLBS;
    531        } else {
    532            exception = EXCP_TLBL;
    533        }
    534        break;
    535    case TLBRET_DIRTY:
    536        /* TLB match but 'D' bit is cleared */
    537        exception = EXCP_LTLBL;
    538        break;
    539    case TLBRET_XI:
    540        /* Execute-Inhibit Exception */
    541        if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
    542            exception = EXCP_TLBXI;
    543        } else {
    544            exception = EXCP_TLBL;
    545        }
    546        break;
    547    case TLBRET_RI:
    548        /* Read-Inhibit Exception */
    549        if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
    550            exception = EXCP_TLBRI;
    551        } else {
    552            exception = EXCP_TLBL;
    553        }
    554        break;
    555    }
    556    /* Raise exception */
    557    if (!(env->hflags & MIPS_HFLAG_DM)) {
    558        env->CP0_BadVAddr = address;
    559    }
    560    env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
    561                       ((address >> 9) & 0x007ffff0);
    562    env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
    563                       (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
    564                       (address & (TARGET_PAGE_MASK << 1));
    565#if defined(TARGET_MIPS64)
    566    env->CP0_EntryHi &= env->SEGMask;
    567    env->CP0_XContext =
    568        (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
    569        (extract64(address, 62, 2) << (env->SEGBITS - 9)) |     /* R       */
    570        (extract64(address, 13, env->SEGBITS - 13) << 4);       /* BadVPN2 */
    571#endif
    572    cs->exception_index = exception;
    573    env->error_code = error_code;
    574}
    575
    576#if !defined(TARGET_MIPS64)
    577
    578/*
    579 * Perform hardware page table walk
    580 *
    581 * Memory accesses are performed using the KERNEL privilege level.
    582 * Synchronous exceptions detected on memory accesses cause a silent exit
    583 * from page table walking, resulting in a TLB or XTLB Refill exception.
    584 *
    585 * Implementations are not required to support page table walk memory
    586 * accesses from mapped memory regions. When an unsupported access is
    587 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
    588 * exception.
    589 *
    590 * Note that if an exception is caused by AddressTranslation or LoadMemory
    591 * functions, the exception is not taken, a silent exit is taken,
    592 * resulting in a TLB or XTLB Refill exception.
    593 */
    594
    595static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size,
    596        uint64_t *pte)
    597{
    598    if ((vaddr & ((entry_size >> 3) - 1)) != 0) {
    599        return false;
    600    }
    601    if (entry_size == 64) {
    602        *pte = cpu_ldq_code(env, vaddr);
    603    } else {
    604        *pte = cpu_ldl_code(env, vaddr);
    605    }
    606    return true;
    607}
    608
    609static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
    610        int entry_size, int ptei)
    611{
    612    uint64_t result = entry;
    613    uint64_t rixi;
    614    if (ptei > entry_size) {
    615        ptei -= 32;
    616    }
    617    result >>= (ptei - 2);
    618    rixi = result & 3;
    619    result >>= 2;
    620    result |= rixi << CP0EnLo_XI;
    621    return result;
    622}
    623
    624static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
    625        int directory_index, bool *huge_page, bool *hgpg_directory_hit,
    626        uint64_t *pw_entrylo0, uint64_t *pw_entrylo1)
    627{
    628    int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
    629    int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
    630    int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
    631    int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
    632    int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
    633    int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
    634    int directory_shift = (ptew > 1) ? -1 :
    635            (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
    636    int leaf_shift = (ptew > 1) ? -1 :
    637            (ptew == 1) ? native_shift + 1 : native_shift;
    638    uint32_t direntry_size = 1 << (directory_shift + 3);
    639    uint32_t leafentry_size = 1 << (leaf_shift + 3);
    640    uint64_t entry;
    641    uint64_t paddr;
    642    int prot;
    643    uint64_t lsb = 0;
    644    uint64_t w = 0;
    645
    646    if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
    647                             cpu_mmu_index(env, false)) !=
    648                             TLBRET_MATCH) {
    649        /* wrong base address */
    650        return 0;
    651    }
    652    if (!get_pte(env, *vaddr, direntry_size, &entry)) {
    653        return 0;
    654    }
    655
    656    if ((entry & (1 << psn)) && hugepg) {
    657        *huge_page = true;
    658        *hgpg_directory_hit = true;
    659        entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
    660        w = directory_index - 1;
    661        if (directory_index & 0x1) {
    662            /* Generate adjacent page from same PTE for odd TLB page */
    663            lsb = BIT_ULL(w) >> 6;
    664            *pw_entrylo0 = entry & ~lsb; /* even page */
    665            *pw_entrylo1 = entry | lsb; /* odd page */
    666        } else if (dph) {
    667            int oddpagebit = 1 << leaf_shift;
    668            uint64_t vaddr2 = *vaddr ^ oddpagebit;
    669            if (*vaddr & oddpagebit) {
    670                *pw_entrylo1 = entry;
    671            } else {
    672                *pw_entrylo0 = entry;
    673            }
    674            if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
    675                                     cpu_mmu_index(env, false)) !=
    676                                     TLBRET_MATCH) {
    677                return 0;
    678            }
    679            if (!get_pte(env, vaddr2, leafentry_size, &entry)) {
    680                return 0;
    681            }
    682            entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
    683            if (*vaddr & oddpagebit) {
    684                *pw_entrylo0 = entry;
    685            } else {
    686                *pw_entrylo1 = entry;
    687            }
    688        } else {
    689            return 0;
    690        }
    691        return 1;
    692    } else {
    693        *vaddr = entry;
    694        return 2;
    695    }
    696}
    697
    698static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
    699                                   int mmu_idx)
    700{
    701    int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
    702    int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
    703    int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
    704    int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
    705    int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
    706
    707    /* Initial values */
    708    bool huge_page = false;
    709    bool hgpg_bdhit = false;
    710    bool hgpg_gdhit = false;
    711    bool hgpg_udhit = false;
    712    bool hgpg_mdhit = false;
    713
    714    int32_t pw_pagemask = 0;
    715    target_ulong pw_entryhi = 0;
    716    uint64_t pw_entrylo0 = 0;
    717    uint64_t pw_entrylo1 = 0;
    718
    719    /* Native pointer size */
    720    /*For the 32-bit architectures, this bit is fixed to 0.*/
    721    int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
    722
    723    /* Indices from PWField */
    724    int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
    725    int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
    726    int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
    727    int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
    728    int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
    729
    730    /* Indices computed from faulting address */
    731    int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
    732    int uindex = (address >> pf_udw) & ((1 << udw) - 1);
    733    int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
    734    int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
    735
    736    /* Other HTW configs */
    737    int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
    738
    739    /* HTW Shift values (depend on entry size) */
    740    int directory_shift = (ptew > 1) ? -1 :
    741            (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
    742    int leaf_shift = (ptew > 1) ? -1 :
    743            (ptew == 1) ? native_shift + 1 : native_shift;
    744
    745    /* Offsets into tables */
    746    int goffset = gindex << directory_shift;
    747    int uoffset = uindex << directory_shift;
    748    int moffset = mindex << directory_shift;
    749    int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1);
    750    int ptoffset1 = ptoffset0 | (1 << (leaf_shift));
    751
    752    uint32_t leafentry_size = 1 << (leaf_shift + 3);
    753
    754    /* Starting address - Page Table Base */
    755    uint64_t vaddr = env->CP0_PWBase;
    756
    757    uint64_t dir_entry;
    758    uint64_t paddr;
    759    int prot;
    760    int m;
    761
    762    if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
    763        /* walker is unimplemented */
    764        return false;
    765    }
    766    if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
    767        /* walker is disabled */
    768        return false;
    769    }
    770    if (!(gdw > 0 || udw > 0 || mdw > 0)) {
    771        /* no structure to walk */
    772        return false;
    773    }
    774    if ((directory_shift == -1) || (leaf_shift == -1)) {
    775        return false;
    776    }
    777
    778    /* Global Directory */
    779    if (gdw > 0) {
    780        vaddr |= goffset;
    781        switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
    782                               &pw_entrylo0, &pw_entrylo1))
    783        {
    784        case 0:
    785            return false;
    786        case 1:
    787            goto refill;
    788        case 2:
    789        default:
    790            break;
    791        }
    792    }
    793
    794    /* Upper directory */
    795    if (udw > 0) {
    796        vaddr |= uoffset;
    797        switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
    798                               &pw_entrylo0, &pw_entrylo1))
    799        {
    800        case 0:
    801            return false;
    802        case 1:
    803            goto refill;
    804        case 2:
    805        default:
    806            break;
    807        }
    808    }
    809
    810    /* Middle directory */
    811    if (mdw > 0) {
    812        vaddr |= moffset;
    813        switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
    814                               &pw_entrylo0, &pw_entrylo1))
    815        {
    816        case 0:
    817            return false;
    818        case 1:
    819            goto refill;
    820        case 2:
    821        default:
    822            break;
    823        }
    824    }
    825
    826    /* Leaf Level Page Table - First half of PTE pair */
    827    vaddr |= ptoffset0;
    828    if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
    829                             cpu_mmu_index(env, false)) !=
    830                             TLBRET_MATCH) {
    831        return false;
    832    }
    833    if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
    834        return false;
    835    }
    836    dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
    837    pw_entrylo0 = dir_entry;
    838
    839    /* Leaf Level Page Table - Second half of PTE pair */
    840    vaddr |= ptoffset1;
    841    if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
    842                             cpu_mmu_index(env, false)) !=
    843                             TLBRET_MATCH) {
    844        return false;
    845    }
    846    if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
    847        return false;
    848    }
    849    dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
    850    pw_entrylo1 = dir_entry;
    851
    852refill:
    853
    854    m = (1 << pf_ptw) - 1;
    855
    856    if (huge_page) {
    857        switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
    858                hgpg_mdhit)
    859        {
    860        case 4:
    861            m = (1 << pf_gdw) - 1;
    862            if (pf_gdw & 1) {
    863                m >>= 1;
    864            }
    865            break;
    866        case 2:
    867            m = (1 << pf_udw) - 1;
    868            if (pf_udw & 1) {
    869                m >>= 1;
    870            }
    871            break;
    872        case 1:
    873            m = (1 << pf_mdw) - 1;
    874            if (pf_mdw & 1) {
    875                m >>= 1;
    876            }
    877            break;
    878        }
    879    }
    880    pw_pagemask = m >> TARGET_PAGE_BITS_MIN;
    881    update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask);
    882    pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
    883    {
    884        target_ulong tmp_entryhi = env->CP0_EntryHi;
    885        int32_t tmp_pagemask = env->CP0_PageMask;
    886        uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
    887        uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
    888
    889        env->CP0_EntryHi = pw_entryhi;
    890        env->CP0_PageMask = pw_pagemask;
    891        env->CP0_EntryLo0 = pw_entrylo0;
    892        env->CP0_EntryLo1 = pw_entrylo1;
    893
    894        /*
    895         * The hardware page walker inserts a page into the TLB in a manner
    896         * identical to a TLBWR instruction as executed by the software refill
    897         * handler.
    898         */
    899        r4k_helper_tlbwr(env);
    900
    901        env->CP0_EntryHi = tmp_entryhi;
    902        env->CP0_PageMask = tmp_pagemask;
    903        env->CP0_EntryLo0 = tmp_entrylo0;
    904        env->CP0_EntryLo1 = tmp_entrylo1;
    905    }
    906    return true;
    907}
    908#endif
    909
    910bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
    911                       MMUAccessType access_type, int mmu_idx,
    912                       bool probe, uintptr_t retaddr)
    913{
    914    MIPSCPU *cpu = MIPS_CPU(cs);
    915    CPUMIPSState *env = &cpu->env;
    916    hwaddr physical;
    917    int prot;
    918    int ret = TLBRET_BADADDR;
    919
    920    /* data access */
    921    /* XXX: put correct access by using cpu_restore_state() correctly */
    922    ret = get_physical_address(env, &physical, &prot, address,
    923                               access_type, mmu_idx);
    924    switch (ret) {
    925    case TLBRET_MATCH:
    926        qemu_log_mask(CPU_LOG_MMU,
    927                      "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
    928                      " prot %d\n", __func__, address, physical, prot);
    929        break;
    930    default:
    931        qemu_log_mask(CPU_LOG_MMU,
    932                      "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
    933                      ret);
    934        break;
    935    }
    936    if (ret == TLBRET_MATCH) {
    937        tlb_set_page(cs, address & TARGET_PAGE_MASK,
    938                     physical & TARGET_PAGE_MASK, prot,
    939                     mmu_idx, TARGET_PAGE_SIZE);
    940        return true;
    941    }
    942#if !defined(TARGET_MIPS64)
    943    if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
    944        /*
    945         * Memory reads during hardware page table walking are performed
    946         * as if they were kernel-mode load instructions.
    947         */
    948        int mode = (env->hflags & MIPS_HFLAG_KSU);
    949        bool ret_walker;
    950        env->hflags &= ~MIPS_HFLAG_KSU;
    951        ret_walker = page_table_walk_refill(env, address, mmu_idx);
    952        env->hflags |= mode;
    953        if (ret_walker) {
    954            ret = get_physical_address(env, &physical, &prot, address,
    955                                       access_type, mmu_idx);
    956            if (ret == TLBRET_MATCH) {
    957                tlb_set_page(cs, address & TARGET_PAGE_MASK,
    958                             physical & TARGET_PAGE_MASK, prot,
    959                             mmu_idx, TARGET_PAGE_SIZE);
    960                return true;
    961            }
    962        }
    963    }
    964#endif
    965    if (probe) {
    966        return false;
    967    }
    968
    969    raise_mmu_exception(env, address, access_type, ret);
    970    do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
    971}
    972
    973hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
    974                                  MMUAccessType access_type, uintptr_t retaddr)
    975{
    976    hwaddr physical;
    977    int prot;
    978    int ret = 0;
    979    CPUState *cs = env_cpu(env);
    980
    981    /* data access */
    982    ret = get_physical_address(env, &physical, &prot, address, access_type,
    983                               cpu_mmu_index(env, false));
    984    if (ret == TLBRET_MATCH) {
    985        return physical;
    986    }
    987
    988    raise_mmu_exception(env, address, access_type, ret);
    989    cpu_loop_exit_restore(cs, retaddr);
    990}
    991
    992static void set_hflags_for_handler(CPUMIPSState *env)
    993{
    994    /* Exception handlers are entered in 32-bit mode.  */
    995    env->hflags &= ~(MIPS_HFLAG_M16);
    996    /* ...except that microMIPS lets you choose.  */
    997    if (env->insn_flags & ASE_MICROMIPS) {
    998        env->hflags |= (!!(env->CP0_Config3 &
    999                           (1 << CP0C3_ISA_ON_EXC))
   1000                        << MIPS_HFLAG_M16_SHIFT);
   1001    }
   1002}
   1003
   1004static inline void set_badinstr_registers(CPUMIPSState *env)
   1005{
   1006    if (env->insn_flags & ISA_NANOMIPS32) {
   1007        if (env->CP0_Config3 & (1 << CP0C3_BI)) {
   1008            uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
   1009            if ((instr & 0x10000000) == 0) {
   1010                instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
   1011            }
   1012            env->CP0_BadInstr = instr;
   1013
   1014            if ((instr & 0xFC000000) == 0x60000000) {
   1015                instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
   1016                env->CP0_BadInstrX = instr;
   1017            }
   1018        }
   1019        return;
   1020    }
   1021
   1022    if (env->hflags & MIPS_HFLAG_M16) {
   1023        /* TODO: add BadInstr support for microMIPS */
   1024        return;
   1025    }
   1026    if (env->CP0_Config3 & (1 << CP0C3_BI)) {
   1027        env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
   1028    }
   1029    if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
   1030        (env->hflags & MIPS_HFLAG_BMASK)) {
   1031        env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
   1032    }
   1033}
   1034
   1035void mips_cpu_do_interrupt(CPUState *cs)
   1036{
   1037    MIPSCPU *cpu = MIPS_CPU(cs);
   1038    CPUMIPSState *env = &cpu->env;
   1039    bool update_badinstr = 0;
   1040    target_ulong offset;
   1041    int cause = -1;
   1042
   1043    if (qemu_loglevel_mask(CPU_LOG_INT)
   1044        && cs->exception_index != EXCP_EXT_INTERRUPT) {
   1045        qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
   1046                 " %s exception\n",
   1047                 __func__, env->active_tc.PC, env->CP0_EPC,
   1048                 mips_exception_name(cs->exception_index));
   1049    }
   1050    if (cs->exception_index == EXCP_EXT_INTERRUPT &&
   1051        (env->hflags & MIPS_HFLAG_DM)) {
   1052        cs->exception_index = EXCP_DINT;
   1053    }
   1054    offset = 0x180;
   1055    switch (cs->exception_index) {
   1056    case EXCP_DSS:
   1057        env->CP0_Debug |= 1 << CP0DB_DSS;
   1058        /*
   1059         * Debug single step cannot be raised inside a delay slot and
   1060         * resume will always occur on the next instruction
   1061         * (but we assume the pc has always been updated during
   1062         * code translation).
   1063         */
   1064        env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
   1065        goto enter_debug_mode;
   1066    case EXCP_DINT:
   1067        env->CP0_Debug |= 1 << CP0DB_DINT;
   1068        goto set_DEPC;
   1069    case EXCP_DIB:
   1070        env->CP0_Debug |= 1 << CP0DB_DIB;
   1071        goto set_DEPC;
   1072    case EXCP_DBp:
   1073        env->CP0_Debug |= 1 << CP0DB_DBp;
   1074        /* Setup DExcCode - SDBBP instruction */
   1075        env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
   1076                         (9 << CP0DB_DEC);
   1077        goto set_DEPC;
   1078    case EXCP_DDBS:
   1079        env->CP0_Debug |= 1 << CP0DB_DDBS;
   1080        goto set_DEPC;
   1081    case EXCP_DDBL:
   1082        env->CP0_Debug |= 1 << CP0DB_DDBL;
   1083    set_DEPC:
   1084        env->CP0_DEPC = exception_resume_pc(env);
   1085        env->hflags &= ~MIPS_HFLAG_BMASK;
   1086 enter_debug_mode:
   1087        if (env->insn_flags & ISA_MIPS3) {
   1088            env->hflags |= MIPS_HFLAG_64;
   1089            if (!(env->insn_flags & ISA_MIPS_R6) ||
   1090                env->CP0_Status & (1 << CP0St_KX)) {
   1091                env->hflags &= ~MIPS_HFLAG_AWRAP;
   1092            }
   1093        }
   1094        env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
   1095        env->hflags &= ~(MIPS_HFLAG_KSU);
   1096        /* EJTAG probe trap enable is not implemented... */
   1097        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
   1098            env->CP0_Cause &= ~(1U << CP0Ca_BD);
   1099        }
   1100        env->active_tc.PC = env->exception_base + 0x480;
   1101        set_hflags_for_handler(env);
   1102        break;
   1103    case EXCP_RESET:
   1104        cpu_reset(CPU(cpu));
   1105        break;
   1106    case EXCP_SRESET:
   1107        env->CP0_Status |= (1 << CP0St_SR);
   1108        memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
   1109        goto set_error_EPC;
   1110    case EXCP_NMI:
   1111        env->CP0_Status |= (1 << CP0St_NMI);
   1112 set_error_EPC:
   1113        env->CP0_ErrorEPC = exception_resume_pc(env);
   1114        env->hflags &= ~MIPS_HFLAG_BMASK;
   1115        env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
   1116        if (env->insn_flags & ISA_MIPS3) {
   1117            env->hflags |= MIPS_HFLAG_64;
   1118            if (!(env->insn_flags & ISA_MIPS_R6) ||
   1119                env->CP0_Status & (1 << CP0St_KX)) {
   1120                env->hflags &= ~MIPS_HFLAG_AWRAP;
   1121            }
   1122        }
   1123        env->hflags |= MIPS_HFLAG_CP0;
   1124        env->hflags &= ~(MIPS_HFLAG_KSU);
   1125        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
   1126            env->CP0_Cause &= ~(1U << CP0Ca_BD);
   1127        }
   1128        env->active_tc.PC = env->exception_base;
   1129        set_hflags_for_handler(env);
   1130        break;
   1131    case EXCP_EXT_INTERRUPT:
   1132        cause = 0;
   1133        if (env->CP0_Cause & (1 << CP0Ca_IV)) {
   1134            uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
   1135
   1136            if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
   1137                offset = 0x200;
   1138            } else {
   1139                uint32_t vector = 0;
   1140                uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
   1141
   1142                if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
   1143                    /*
   1144                     * For VEIC mode, the external interrupt controller feeds
   1145                     * the vector through the CP0Cause IP lines.
   1146                     */
   1147                    vector = pending;
   1148                } else {
   1149                    /*
   1150                     * Vectored Interrupts
   1151                     * Mask with Status.IM7-IM0 to get enabled interrupts.
   1152                     */
   1153                    pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
   1154                    /* Find the highest-priority interrupt. */
   1155                    while (pending >>= 1) {
   1156                        vector++;
   1157                    }
   1158                }
   1159                offset = 0x200 + (vector * (spacing << 5));
   1160            }
   1161        }
   1162        goto set_EPC;
   1163    case EXCP_LTLBL:
   1164        cause = 1;
   1165        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
   1166        goto set_EPC;
   1167    case EXCP_TLBL:
   1168        cause = 2;
   1169        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
   1170        if ((env->error_code & EXCP_TLB_NOMATCH) &&
   1171            !(env->CP0_Status & (1 << CP0St_EXL))) {
   1172#if defined(TARGET_MIPS64)
   1173            int R = env->CP0_BadVAddr >> 62;
   1174            int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
   1175            int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
   1176
   1177            if ((R != 0 || UX) && (R != 3 || KX) &&
   1178                (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
   1179                offset = 0x080;
   1180            } else {
   1181#endif
   1182                offset = 0x000;
   1183#if defined(TARGET_MIPS64)
   1184            }
   1185#endif
   1186        }
   1187        goto set_EPC;
   1188    case EXCP_TLBS:
   1189        cause = 3;
   1190        update_badinstr = 1;
   1191        if ((env->error_code & EXCP_TLB_NOMATCH) &&
   1192            !(env->CP0_Status & (1 << CP0St_EXL))) {
   1193#if defined(TARGET_MIPS64)
   1194            int R = env->CP0_BadVAddr >> 62;
   1195            int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
   1196            int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
   1197
   1198            if ((R != 0 || UX) && (R != 3 || KX) &&
   1199                (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
   1200                offset = 0x080;
   1201            } else {
   1202#endif
   1203                offset = 0x000;
   1204#if defined(TARGET_MIPS64)
   1205            }
   1206#endif
   1207        }
   1208        goto set_EPC;
   1209    case EXCP_AdEL:
   1210        cause = 4;
   1211        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
   1212        goto set_EPC;
   1213    case EXCP_AdES:
   1214        cause = 5;
   1215        update_badinstr = 1;
   1216        goto set_EPC;
   1217    case EXCP_IBE:
   1218        cause = 6;
   1219        goto set_EPC;
   1220    case EXCP_DBE:
   1221        cause = 7;
   1222        goto set_EPC;
   1223    case EXCP_SYSCALL:
   1224        cause = 8;
   1225        update_badinstr = 1;
   1226        goto set_EPC;
   1227    case EXCP_BREAK:
   1228        cause = 9;
   1229        update_badinstr = 1;
   1230        goto set_EPC;
   1231    case EXCP_RI:
   1232        cause = 10;
   1233        update_badinstr = 1;
   1234        goto set_EPC;
   1235    case EXCP_CpU:
   1236        cause = 11;
   1237        update_badinstr = 1;
   1238        env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
   1239                         (env->error_code << CP0Ca_CE);
   1240        goto set_EPC;
   1241    case EXCP_OVERFLOW:
   1242        cause = 12;
   1243        update_badinstr = 1;
   1244        goto set_EPC;
   1245    case EXCP_TRAP:
   1246        cause = 13;
   1247        update_badinstr = 1;
   1248        goto set_EPC;
   1249    case EXCP_MSAFPE:
   1250        cause = 14;
   1251        update_badinstr = 1;
   1252        goto set_EPC;
   1253    case EXCP_FPE:
   1254        cause = 15;
   1255        update_badinstr = 1;
   1256        goto set_EPC;
   1257    case EXCP_C2E:
   1258        cause = 18;
   1259        goto set_EPC;
   1260    case EXCP_TLBRI:
   1261        cause = 19;
   1262        update_badinstr = 1;
   1263        goto set_EPC;
   1264    case EXCP_TLBXI:
   1265        cause = 20;
   1266        goto set_EPC;
   1267    case EXCP_MSADIS:
   1268        cause = 21;
   1269        update_badinstr = 1;
   1270        goto set_EPC;
   1271    case EXCP_MDMX:
   1272        cause = 22;
   1273        goto set_EPC;
   1274    case EXCP_DWATCH:
   1275        cause = 23;
   1276        /* XXX: TODO: manage deferred watch exceptions */
   1277        goto set_EPC;
   1278    case EXCP_MCHECK:
   1279        cause = 24;
   1280        goto set_EPC;
   1281    case EXCP_THREAD:
   1282        cause = 25;
   1283        goto set_EPC;
   1284    case EXCP_DSPDIS:
   1285        cause = 26;
   1286        goto set_EPC;
   1287    case EXCP_CACHE:
   1288        cause = 30;
   1289        offset = 0x100;
   1290 set_EPC:
   1291        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
   1292            env->CP0_EPC = exception_resume_pc(env);
   1293            if (update_badinstr) {
   1294                set_badinstr_registers(env);
   1295            }
   1296            if (env->hflags & MIPS_HFLAG_BMASK) {
   1297                env->CP0_Cause |= (1U << CP0Ca_BD);
   1298            } else {
   1299                env->CP0_Cause &= ~(1U << CP0Ca_BD);
   1300            }
   1301            env->CP0_Status |= (1 << CP0St_EXL);
   1302            if (env->insn_flags & ISA_MIPS3) {
   1303                env->hflags |= MIPS_HFLAG_64;
   1304                if (!(env->insn_flags & ISA_MIPS_R6) ||
   1305                    env->CP0_Status & (1 << CP0St_KX)) {
   1306                    env->hflags &= ~MIPS_HFLAG_AWRAP;
   1307                }
   1308            }
   1309            env->hflags |= MIPS_HFLAG_CP0;
   1310            env->hflags &= ~(MIPS_HFLAG_KSU);
   1311        }
   1312        env->hflags &= ~MIPS_HFLAG_BMASK;
   1313        if (env->CP0_Status & (1 << CP0St_BEV)) {
   1314            env->active_tc.PC = env->exception_base + 0x200;
   1315        } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
   1316                                    env->CP0_Config5 & (1 << CP0C5_CV))) {
   1317            /* Force KSeg1 for cache errors */
   1318            env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
   1319        } else {
   1320            env->active_tc.PC = env->CP0_EBase & ~0xfff;
   1321        }
   1322
   1323        env->active_tc.PC += offset;
   1324        set_hflags_for_handler(env);
   1325        env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
   1326                         (cause << CP0Ca_EC);
   1327        break;
   1328    default:
   1329        abort();
   1330    }
   1331    if (qemu_loglevel_mask(CPU_LOG_INT)
   1332        && cs->exception_index != EXCP_EXT_INTERRUPT) {
   1333        qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
   1334                 "    S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
   1335                 __func__, env->active_tc.PC, env->CP0_EPC, cause,
   1336                 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
   1337                 env->CP0_DEPC);
   1338    }
   1339    cs->exception_index = EXCP_NONE;
   1340}
   1341
   1342bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
   1343{
   1344    if (interrupt_request & CPU_INTERRUPT_HARD) {
   1345        MIPSCPU *cpu = MIPS_CPU(cs);
   1346        CPUMIPSState *env = &cpu->env;
   1347
   1348        if (cpu_mips_hw_interrupts_enabled(env) &&
   1349            cpu_mips_hw_interrupts_pending(env)) {
   1350            /* Raise it */
   1351            cs->exception_index = EXCP_EXT_INTERRUPT;
   1352            env->error_code = 0;
   1353            mips_cpu_do_interrupt(cs);
   1354            return true;
   1355        }
   1356    }
   1357    return false;
   1358}
   1359
   1360void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
   1361{
   1362    CPUState *cs = env_cpu(env);
   1363    r4k_tlb_t *tlb;
   1364    target_ulong addr;
   1365    target_ulong end;
   1366    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
   1367    uint32_t MMID = env->CP0_MemoryMapID;
   1368    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
   1369    uint32_t tlb_mmid;
   1370    target_ulong mask;
   1371
   1372    MMID = mi ? MMID : (uint32_t) ASID;
   1373
   1374    tlb = &env->tlb->mmu.r4k.tlb[idx];
   1375    /*
   1376     * The qemu TLB is flushed when the ASID/MMID changes, so no need to
   1377     * flush these entries again.
   1378     */
   1379    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
   1380    if (tlb->G == 0 && tlb_mmid != MMID) {
   1381        return;
   1382    }
   1383
   1384    if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
   1385        /*
   1386         * For tlbwr, we can shadow the discarded entry into
   1387         * a new (fake) TLB entry, as long as the guest can not
   1388         * tell that it's there.
   1389         */
   1390        env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
   1391        env->tlb->tlb_in_use++;
   1392        return;
   1393    }
   1394
   1395    /* 1k pages are not supported. */
   1396    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
   1397    if (tlb->V0) {
   1398        addr = tlb->VPN & ~mask;
   1399#if defined(TARGET_MIPS64)
   1400        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
   1401            addr |= 0x3FFFFF0000000000ULL;
   1402        }
   1403#endif
   1404        end = addr | (mask >> 1);
   1405        while (addr < end) {
   1406            tlb_flush_page(cs, addr);
   1407            addr += TARGET_PAGE_SIZE;
   1408        }
   1409    }
   1410    if (tlb->V1) {
   1411        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
   1412#if defined(TARGET_MIPS64)
   1413        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
   1414            addr |= 0x3FFFFF0000000000ULL;
   1415        }
   1416#endif
   1417        end = addr | mask;
   1418        while (addr - 1 < end) {
   1419            tlb_flush_page(cs, addr);
   1420            addr += TARGET_PAGE_SIZE;
   1421        }
   1422    }
   1423}