cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

trans_rva.c.inc (6574B)


      1/*
      2 * RISC-V translation routines for the RV64A Standard Extension.
      3 *
      4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
      5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
      6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
      7 *
      8 * This program is free software; you can redistribute it and/or modify it
      9 * under the terms and conditions of the GNU General Public License,
     10 * version 2 or later, as published by the Free Software Foundation.
     11 *
     12 * This program is distributed in the hope it will be useful, but WITHOUT
     13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
     15 * more details.
     16 *
     17 * You should have received a copy of the GNU General Public License along with
     18 * this program.  If not, see <http://www.gnu.org/licenses/>.
     19 */
     20
     21static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
     22{
     23    TCGv src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
     24
     25    if (a->rl) {
     26        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
     27    }
     28    tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
     29    if (a->aq) {
     30        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
     31    }
     32
     33    /* Put addr in load_res, data in load_val.  */
     34    tcg_gen_mov_tl(load_res, src1);
     35    gen_set_gpr(ctx, a->rd, load_val);
     36
     37    return true;
     38}
     39
     40static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
     41{
     42    TCGv dest, src1, src2;
     43    TCGLabel *l1 = gen_new_label();
     44    TCGLabel *l2 = gen_new_label();
     45
     46    src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
     47    tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
     48
     49    /*
     50     * Note that the TCG atomic primitives are SC,
     51     * so we can ignore AQ/RL along this path.
     52     */
     53    dest = dest_gpr(ctx, a->rd);
     54    src2 = get_gpr(ctx, a->rs2, EXT_NONE);
     55    tcg_gen_atomic_cmpxchg_tl(dest, load_res, load_val, src2,
     56                              ctx->mem_idx, mop);
     57    tcg_gen_setcond_tl(TCG_COND_NE, dest, dest, load_val);
     58    gen_set_gpr(ctx, a->rd, dest);
     59    tcg_gen_br(l2);
     60
     61    gen_set_label(l1);
     62    /*
     63     * Address comparison failure.  However, we still need to
     64     * provide the memory barrier implied by AQ/RL.
     65     */
     66    tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL);
     67    gen_set_gpr(ctx, a->rd, tcg_constant_tl(1));
     68
     69    gen_set_label(l2);
     70    /*
     71     * Clear the load reservation, since an SC must fail if there is
     72     * an SC to any address, in between an LR and SC pair.
     73     */
     74    tcg_gen_movi_tl(load_res, -1);
     75
     76    return true;
     77}
     78
     79static bool gen_amo(DisasContext *ctx, arg_atomic *a,
     80                    void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
     81                    MemOp mop)
     82{
     83    TCGv dest = dest_gpr(ctx, a->rd);
     84    TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
     85    TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
     86
     87    func(dest, src1, src2, ctx->mem_idx, mop);
     88
     89    gen_set_gpr(ctx, a->rd, dest);
     90    return true;
     91}
     92
     93static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
     94{
     95    REQUIRE_EXT(ctx, RVA);
     96    return gen_lr(ctx, a, (MO_ALIGN | MO_TESL));
     97}
     98
     99static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
    100{
    101    REQUIRE_EXT(ctx, RVA);
    102    return gen_sc(ctx, a, (MO_ALIGN | MO_TESL));
    103}
    104
    105static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
    106{
    107    REQUIRE_EXT(ctx, RVA);
    108    return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
    109}
    110
    111static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
    112{
    113    REQUIRE_EXT(ctx, RVA);
    114    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
    115}
    116
    117static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
    118{
    119    REQUIRE_EXT(ctx, RVA);
    120    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
    121}
    122
    123static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
    124{
    125    REQUIRE_EXT(ctx, RVA);
    126    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
    127}
    128
    129static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
    130{
    131    REQUIRE_EXT(ctx, RVA);
    132    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
    133}
    134
    135static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
    136{
    137    REQUIRE_EXT(ctx, RVA);
    138    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
    139}
    140
    141static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
    142{
    143    REQUIRE_EXT(ctx, RVA);
    144    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
    145}
    146
    147static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
    148{
    149    REQUIRE_EXT(ctx, RVA);
    150    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
    151}
    152
    153static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
    154{
    155    REQUIRE_EXT(ctx, RVA);
    156    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
    157}
    158
    159static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
    160{
    161    REQUIRE_64BIT(ctx);
    162    return gen_lr(ctx, a, MO_ALIGN | MO_TEQ);
    163}
    164
    165static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
    166{
    167    REQUIRE_64BIT(ctx);
    168    return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ));
    169}
    170
    171static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
    172{
    173    REQUIRE_64BIT(ctx);
    174    return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ));
    175}
    176
    177static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
    178{
    179    REQUIRE_64BIT(ctx);
    180    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ));
    181}
    182
    183static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
    184{
    185    REQUIRE_64BIT(ctx);
    186    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ));
    187}
    188
    189static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
    190{
    191    REQUIRE_64BIT(ctx);
    192    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ));
    193}
    194
    195static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
    196{
    197    REQUIRE_64BIT(ctx);
    198    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ));
    199}
    200
    201static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
    202{
    203    REQUIRE_64BIT(ctx);
    204    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ));
    205}
    206
    207static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
    208{
    209    REQUIRE_64BIT(ctx);
    210    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ));
    211}
    212
    213static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
    214{
    215    REQUIRE_64BIT(ctx);
    216    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ));
    217}
    218
    219static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
    220{
    221    REQUIRE_64BIT(ctx);
    222    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ));
    223}