cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

pmp.c (18625B)


      1/*
      2 * QEMU RISC-V PMP (Physical Memory Protection)
      3 *
      4 * Author: Daire McNamara, daire.mcnamara@emdalo.com
      5 *         Ivan Griffin, ivan.griffin@emdalo.com
      6 *
      7 * This provides a RISC-V Physical Memory Protection implementation
      8 *
      9 * This program is free software; you can redistribute it and/or modify it
     10 * under the terms and conditions of the GNU General Public License,
     11 * version 2 or later, as published by the Free Software Foundation.
     12 *
     13 * This program is distributed in the hope it will be useful, but WITHOUT
     14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     15 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
     16 * more details.
     17 *
     18 * You should have received a copy of the GNU General Public License along with
     19 * this program.  If not, see <http://www.gnu.org/licenses/>.
     20 */
     21
     22#include "qemu/osdep.h"
     23#include "qemu/log.h"
     24#include "qapi/error.h"
     25#include "cpu.h"
     26#include "trace.h"
     27#include "exec/exec-all.h"
     28
     29static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
     30    uint8_t val);
     31static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
     32static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
     33
     34/*
     35 * Accessor method to extract address matching type 'a field' from cfg reg
     36 */
     37static inline uint8_t pmp_get_a_field(uint8_t cfg)
     38{
     39    uint8_t a = cfg >> 3;
     40    return a & 0x3;
     41}
     42
     43/*
     44 * Check whether a PMP is locked or not.
     45 */
     46static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
     47{
     48
     49    if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
     50        return 1;
     51    }
     52
     53    /* Top PMP has no 'next' to check */
     54    if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
     55        return 0;
     56    }
     57
     58    return 0;
     59}
     60
     61/*
     62 * Count the number of active rules.
     63 */
     64uint32_t pmp_get_num_rules(CPURISCVState *env)
     65{
     66     return env->pmp_state.num_rules;
     67}
     68
     69/*
     70 * Accessor to get the cfg reg for a specific PMP/HART
     71 */
     72static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
     73{
     74    if (pmp_index < MAX_RISCV_PMPS) {
     75        return env->pmp_state.pmp[pmp_index].cfg_reg;
     76    }
     77
     78    return 0;
     79}
     80
     81
     82/*
     83 * Accessor to set the cfg reg for a specific PMP/HART
     84 * Bounds checks and relevant lock bit.
     85 */
     86static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
     87{
     88    if (pmp_index < MAX_RISCV_PMPS) {
     89        bool locked = true;
     90
     91        if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
     92            /* mseccfg.RLB is set */
     93            if (MSECCFG_RLB_ISSET(env)) {
     94                locked = false;
     95            }
     96
     97            /* mseccfg.MML is not set */
     98            if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
     99                locked = false;
    100            }
    101
    102            /* mseccfg.MML is set */
    103            if (MSECCFG_MML_ISSET(env)) {
    104                /* not adding execute bit */
    105                if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
    106                    locked = false;
    107                }
    108                /* shared region and not adding X bit */
    109                if ((val & PMP_LOCK) != PMP_LOCK &&
    110                    (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
    111                    locked = false;
    112                }
    113            }
    114        } else {
    115            if (!pmp_is_locked(env, pmp_index)) {
    116                locked = false;
    117            }
    118        }
    119
    120        if (locked) {
    121            qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
    122        } else {
    123            env->pmp_state.pmp[pmp_index].cfg_reg = val;
    124            pmp_update_rule(env, pmp_index);
    125        }
    126    } else {
    127        qemu_log_mask(LOG_GUEST_ERROR,
    128                      "ignoring pmpcfg write - out of bounds\n");
    129    }
    130}
    131
    132static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
    133{
    134    /*
    135       aaaa...aaa0   8-byte NAPOT range
    136       aaaa...aa01   16-byte NAPOT range
    137       aaaa...a011   32-byte NAPOT range
    138       ...
    139       aa01...1111   2^XLEN-byte NAPOT range
    140       a011...1111   2^(XLEN+1)-byte NAPOT range
    141       0111...1111   2^(XLEN+2)-byte NAPOT range
    142       1111...1111   Reserved
    143    */
    144    if (a == -1) {
    145        *sa = 0u;
    146        *ea = -1;
    147        return;
    148    } else {
    149        target_ulong t1 = ctz64(~a);
    150        target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2;
    151        target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
    152        *sa = base;
    153        *ea = base + range;
    154    }
    155}
    156
    157void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
    158{
    159    uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
    160    target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
    161    target_ulong prev_addr = 0u;
    162    target_ulong sa = 0u;
    163    target_ulong ea = 0u;
    164
    165    if (pmp_index >= 1u) {
    166        prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
    167    }
    168
    169    switch (pmp_get_a_field(this_cfg)) {
    170    case PMP_AMATCH_OFF:
    171        sa = 0u;
    172        ea = -1;
    173        break;
    174
    175    case PMP_AMATCH_TOR:
    176        sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
    177        ea = (this_addr << 2) - 1u;
    178        break;
    179
    180    case PMP_AMATCH_NA4:
    181        sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
    182        ea = (sa + 4u) - 1u;
    183        break;
    184
    185    case PMP_AMATCH_NAPOT:
    186        pmp_decode_napot(this_addr, &sa, &ea);
    187        break;
    188
    189    default:
    190        sa = 0u;
    191        ea = 0u;
    192        break;
    193    }
    194
    195    env->pmp_state.addr[pmp_index].sa = sa;
    196    env->pmp_state.addr[pmp_index].ea = ea;
    197}
    198
    199void pmp_update_rule_nums(CPURISCVState *env)
    200{
    201    int i;
    202
    203    env->pmp_state.num_rules = 0;
    204    for (i = 0; i < MAX_RISCV_PMPS; i++) {
    205        const uint8_t a_field =
    206            pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
    207        if (PMP_AMATCH_OFF != a_field) {
    208            env->pmp_state.num_rules++;
    209        }
    210    }
    211}
    212
    213/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
    214 *   end address values.
    215 *   This function is called relatively infrequently whereas the check that
    216 *   an address is within a pmp rule is called often, so optimise that one
    217 */
    218static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
    219{
    220    pmp_update_rule_addr(env, pmp_index);
    221    pmp_update_rule_nums(env);
    222}
    223
    224static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
    225{
    226    int result = 0;
    227
    228    if ((addr >= env->pmp_state.addr[pmp_index].sa)
    229        && (addr <= env->pmp_state.addr[pmp_index].ea)) {
    230        result = 1;
    231    } else {
    232        result = 0;
    233    }
    234
    235    return result;
    236}
    237
    238/*
    239 * Check if the address has required RWX privs when no PMP entry is matched.
    240 */
    241static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
    242    target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
    243    target_ulong mode)
    244{
    245    bool ret;
    246
    247    if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
    248        if (MSECCFG_MMWP_ISSET(env)) {
    249            /*
    250             * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
    251             * so we default to deny all, even for M-mode.
    252             */
    253            *allowed_privs = 0;
    254            return false;
    255        } else if (MSECCFG_MML_ISSET(env)) {
    256            /*
    257             * The Machine Mode Lockdown (mseccfg.MML) bit is set
    258             * so we can only execute code in M-mode with an applicable
    259             * rule. Other modes are disabled.
    260             */
    261            if (mode == PRV_M && !(privs & PMP_EXEC)) {
    262                ret = true;
    263                *allowed_privs = PMP_READ | PMP_WRITE;
    264            } else {
    265                ret = false;
    266                *allowed_privs = 0;
    267            }
    268
    269            return ret;
    270        }
    271    }
    272
    273    if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
    274        /*
    275         * Privileged spec v1.10 states if HW doesn't implement any PMP entry
    276         * or no PMP entry matches an M-Mode access, the access succeeds.
    277         */
    278        ret = true;
    279        *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
    280    } else {
    281        /*
    282         * Other modes are not allowed to succeed if they don't * match a rule,
    283         * but there are rules. We've checked for no rule earlier in this
    284         * function.
    285         */
    286        ret = false;
    287        *allowed_privs = 0;
    288    }
    289
    290    return ret;
    291}
    292
    293
    294/*
    295 * Public Interface
    296 */
    297
    298/*
    299 * Check if the address has required RWX privs to complete desired operation
    300 */
    301bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
    302    target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
    303    target_ulong mode)
    304{
    305    int i = 0;
    306    int ret = -1;
    307    int pmp_size = 0;
    308    target_ulong s = 0;
    309    target_ulong e = 0;
    310
    311    /* Short cut if no rules */
    312    if (0 == pmp_get_num_rules(env)) {
    313        return pmp_hart_has_privs_default(env, addr, size, privs,
    314                                          allowed_privs, mode);
    315    }
    316
    317    if (size == 0) {
    318        if (riscv_feature(env, RISCV_FEATURE_MMU)) {
    319            /*
    320             * If size is unknown (0), assume that all bytes
    321             * from addr to the end of the page will be accessed.
    322             */
    323            pmp_size = -(addr | TARGET_PAGE_MASK);
    324        } else {
    325            pmp_size = sizeof(target_ulong);
    326        }
    327    } else {
    328        pmp_size = size;
    329    }
    330
    331    /* 1.10 draft priv spec states there is an implicit order
    332         from low to high */
    333    for (i = 0; i < MAX_RISCV_PMPS; i++) {
    334        s = pmp_is_in_range(env, i, addr);
    335        e = pmp_is_in_range(env, i, addr + pmp_size - 1);
    336
    337        /* partially inside */
    338        if ((s + e) == 1) {
    339            qemu_log_mask(LOG_GUEST_ERROR,
    340                          "pmp violation - access is partially inside\n");
    341            ret = 0;
    342            break;
    343        }
    344
    345        /* fully inside */
    346        const uint8_t a_field =
    347            pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
    348
    349        /*
    350         * Convert the PMP permissions to match the truth table in the
    351         * ePMP spec.
    352         */
    353        const uint8_t epmp_operation =
    354            ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
    355            ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
    356            (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
    357            ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
    358
    359        if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
    360            /*
    361             * If the PMP entry is not off and the address is in range,
    362             * do the priv check
    363             */
    364            if (!MSECCFG_MML_ISSET(env)) {
    365                /*
    366                 * If mseccfg.MML Bit is not set, do pmp priv check
    367                 * This will always apply to regular PMP.
    368                 */
    369                *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
    370                if ((mode != PRV_M) || pmp_is_locked(env, i)) {
    371                    *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
    372                }
    373            } else {
    374                /*
    375                 * If mseccfg.MML Bit set, do the enhanced pmp priv check
    376                 */
    377                if (mode == PRV_M) {
    378                    switch (epmp_operation) {
    379                    case 0:
    380                    case 1:
    381                    case 4:
    382                    case 5:
    383                    case 6:
    384                    case 7:
    385                    case 8:
    386                        *allowed_privs = 0;
    387                        break;
    388                    case 2:
    389                    case 3:
    390                    case 14:
    391                        *allowed_privs = PMP_READ | PMP_WRITE;
    392                        break;
    393                    case 9:
    394                    case 10:
    395                        *allowed_privs = PMP_EXEC;
    396                        break;
    397                    case 11:
    398                    case 13:
    399                        *allowed_privs = PMP_READ | PMP_EXEC;
    400                        break;
    401                    case 12:
    402                    case 15:
    403                        *allowed_privs = PMP_READ;
    404                        break;
    405                    default:
    406                        g_assert_not_reached();
    407                    }
    408                } else {
    409                    switch (epmp_operation) {
    410                    case 0:
    411                    case 8:
    412                    case 9:
    413                    case 12:
    414                    case 13:
    415                    case 14:
    416                        *allowed_privs = 0;
    417                        break;
    418                    case 1:
    419                    case 10:
    420                    case 11:
    421                        *allowed_privs = PMP_EXEC;
    422                        break;
    423                    case 2:
    424                    case 4:
    425                    case 15:
    426                        *allowed_privs = PMP_READ;
    427                        break;
    428                    case 3:
    429                    case 6:
    430                        *allowed_privs = PMP_READ | PMP_WRITE;
    431                        break;
    432                    case 5:
    433                        *allowed_privs = PMP_READ | PMP_EXEC;
    434                        break;
    435                    case 7:
    436                        *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
    437                        break;
    438                    default:
    439                        g_assert_not_reached();
    440                    }
    441                }
    442            }
    443
    444            ret = ((privs & *allowed_privs) == privs);
    445            break;
    446        }
    447    }
    448
    449    /* No rule matched */
    450    if (ret == -1) {
    451        return pmp_hart_has_privs_default(env, addr, size, privs,
    452                                          allowed_privs, mode);
    453    }
    454
    455    return ret == 1 ? true : false;
    456}
    457
    458/*
    459 * Handle a write to a pmpcfg CSR
    460 */
    461void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
    462    target_ulong val)
    463{
    464    int i;
    465    uint8_t cfg_val;
    466
    467    trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
    468
    469    if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
    470        qemu_log_mask(LOG_GUEST_ERROR,
    471                      "ignoring pmpcfg write - incorrect address\n");
    472        return;
    473    }
    474
    475    for (i = 0; i < sizeof(target_ulong); i++) {
    476        cfg_val = (val >> 8 * i)  & 0xff;
    477        pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
    478    }
    479
    480    /* If PMP permission of any addr has been changed, flush TLB pages. */
    481    tlb_flush(env_cpu(env));
    482}
    483
    484
    485/*
    486 * Handle a read from a pmpcfg CSR
    487 */
    488target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
    489{
    490    int i;
    491    target_ulong cfg_val = 0;
    492    target_ulong val = 0;
    493
    494    for (i = 0; i < sizeof(target_ulong); i++) {
    495        val = pmp_read_cfg(env, (reg_index * 4) + i);
    496        cfg_val |= (val << (i * 8));
    497    }
    498    trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val);
    499
    500    return cfg_val;
    501}
    502
    503
    504/*
    505 * Handle a write to a pmpaddr CSR
    506 */
    507void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
    508    target_ulong val)
    509{
    510    trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
    511
    512    if (addr_index < MAX_RISCV_PMPS) {
    513        /*
    514         * In TOR mode, need to check the lock bit of the next pmp
    515         * (if there is a next).
    516         */
    517        if (addr_index + 1 < MAX_RISCV_PMPS) {
    518            uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
    519
    520            if (pmp_cfg & PMP_LOCK &&
    521                PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) {
    522                qemu_log_mask(LOG_GUEST_ERROR,
    523                              "ignoring pmpaddr write - pmpcfg + 1 locked\n");
    524                return;
    525            }
    526        }
    527
    528        if (!pmp_is_locked(env, addr_index)) {
    529            env->pmp_state.pmp[addr_index].addr_reg = val;
    530            pmp_update_rule(env, addr_index);
    531        } else {
    532            qemu_log_mask(LOG_GUEST_ERROR,
    533                          "ignoring pmpaddr write - locked\n");
    534        }
    535    } else {
    536        qemu_log_mask(LOG_GUEST_ERROR,
    537                      "ignoring pmpaddr write - out of bounds\n");
    538    }
    539}
    540
    541
    542/*
    543 * Handle a read from a pmpaddr CSR
    544 */
    545target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
    546{
    547    target_ulong val = 0;
    548
    549    if (addr_index < MAX_RISCV_PMPS) {
    550        val = env->pmp_state.pmp[addr_index].addr_reg;
    551        trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
    552    } else {
    553        qemu_log_mask(LOG_GUEST_ERROR,
    554                      "ignoring pmpaddr read - out of bounds\n");
    555    }
    556
    557    return val;
    558}
    559
    560/*
    561 * Handle a write to a mseccfg CSR
    562 */
    563void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
    564{
    565    int i;
    566
    567    trace_mseccfg_csr_write(env->mhartid, val);
    568
    569    /* RLB cannot be enabled if it's already 0 and if any regions are locked */
    570    if (!MSECCFG_RLB_ISSET(env)) {
    571        for (i = 0; i < MAX_RISCV_PMPS; i++) {
    572            if (pmp_is_locked(env, i)) {
    573                val &= ~MSECCFG_RLB;
    574                break;
    575            }
    576        }
    577    }
    578
    579    /* Sticky bits */
    580    val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
    581
    582    env->mseccfg = val;
    583}
    584
    585/*
    586 * Handle a read from a mseccfg CSR
    587 */
    588target_ulong mseccfg_csr_read(CPURISCVState *env)
    589{
    590    trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
    591    return env->mseccfg;
    592}
    593
    594/*
    595 * Calculate the TLB size if the start address or the end address of
    596 * PMP entry is presented in the TLB page.
    597 */
    598static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
    599                                     target_ulong tlb_sa, target_ulong tlb_ea)
    600{
    601    target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
    602    target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
    603
    604    if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
    605        return pmp_ea - pmp_sa + 1;
    606    }
    607
    608    if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
    609        return tlb_ea - pmp_sa + 1;
    610    }
    611
    612    if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
    613        return pmp_ea - tlb_sa + 1;
    614    }
    615
    616    return 0;
    617}
    618
    619/*
    620 * Check is there a PMP entry which range covers this page. If so,
    621 * try to find the minimum granularity for the TLB size.
    622 */
    623bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
    624                         target_ulong *tlb_size)
    625{
    626    int i;
    627    target_ulong val;
    628    target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
    629
    630    for (i = 0; i < MAX_RISCV_PMPS; i++) {
    631        val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
    632        if (val) {
    633            if (*tlb_size == 0 || *tlb_size > val) {
    634                *tlb_size = val;
    635            }
    636        }
    637    }
    638
    639    if (*tlb_size != 0) {
    640        return true;
    641    }
    642
    643    return false;
    644}
    645
    646/*
    647 * Convert PMP privilege to TLB page privilege.
    648 */
    649int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
    650{
    651    int prot = 0;
    652
    653    if (pmp_priv & PMP_READ) {
    654        prot |= PAGE_READ;
    655    }
    656    if (pmp_priv & PMP_WRITE) {
    657        prot |= PAGE_WRITE;
    658    }
    659    if (pmp_priv & PMP_EXEC) {
    660        prot |= PAGE_EXEC;
    661    }
    662
    663    return prot;
    664}