cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

tcg-target.c.inc (57743B)


      1/*
      2 * Tiny Code Generator for QEMU
      3 *
      4 * Copyright (c) 2008 Fabrice Bellard
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a copy
      7 * of this software and associated documentation files (the "Software"), to deal
      8 * in the Software without restriction, including without limitation the rights
      9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10 * copies of the Software, and to permit persons to whom the Software is
     11 * furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22 * THE SOFTWARE.
     23 */
     24
     25#include "../tcg-pool.c.inc"
     26
     27#ifdef CONFIG_DEBUG_TCG
     28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
     29    "%g0",
     30    "%g1",
     31    "%g2",
     32    "%g3",
     33    "%g4",
     34    "%g5",
     35    "%g6",
     36    "%g7",
     37    "%o0",
     38    "%o1",
     39    "%o2",
     40    "%o3",
     41    "%o4",
     42    "%o5",
     43    "%o6",
     44    "%o7",
     45    "%l0",
     46    "%l1",
     47    "%l2",
     48    "%l3",
     49    "%l4",
     50    "%l5",
     51    "%l6",
     52    "%l7",
     53    "%i0",
     54    "%i1",
     55    "%i2",
     56    "%i3",
     57    "%i4",
     58    "%i5",
     59    "%i6",
     60    "%i7",
     61};
     62#endif
     63
     64#ifdef __arch64__
     65# define SPARC64 1
     66#else
     67# define SPARC64 0
     68#endif
     69
     70#define TCG_CT_CONST_S11  0x100
     71#define TCG_CT_CONST_S13  0x200
     72#define TCG_CT_CONST_ZERO 0x400
     73
     74/*
     75 * For softmmu, we need to avoid conflicts with the first 3
     76 * argument registers to perform the tlb lookup, and to call
     77 * the helper function.
     78 */
     79#ifdef CONFIG_SOFTMMU
     80#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
     81#else
     82#define SOFTMMU_RESERVE_REGS 0
     83#endif
     84
     85/*
     86 * Note that sparcv8plus can only hold 64 bit quantities in %g and %o
     87 * registers.  These are saved manually by the kernel in full 64-bit
     88 * slots.  The %i and %l registers are saved by the register window
     89 * mechanism, which only allocates space for 32 bits.  Given that this
     90 * window spill/fill can happen on any signal, we must consider the
     91 * high bits of the %i and %l registers garbage at all times.
     92 */
     93#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 32)
     94#if SPARC64
     95# define ALL_GENERAL_REGS64  ALL_GENERAL_REGS
     96#else
     97# define ALL_GENERAL_REGS64  MAKE_64BIT_MASK(0, 16)
     98#endif
     99#define ALL_QLDST_REGS       (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
    100#define ALL_QLDST_REGS64     (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
    101
    102/* Define some temporary registers.  T2 is used for constant generation.  */
    103#define TCG_REG_T1  TCG_REG_G1
    104#define TCG_REG_T2  TCG_REG_O7
    105
    106#ifndef CONFIG_SOFTMMU
    107# define TCG_GUEST_BASE_REG TCG_REG_I5
    108#endif
    109
    110#define TCG_REG_TB  TCG_REG_I1
    111#define USE_REG_TB  (sizeof(void *) > 4)
    112
    113static const int tcg_target_reg_alloc_order[] = {
    114    TCG_REG_L0,
    115    TCG_REG_L1,
    116    TCG_REG_L2,
    117    TCG_REG_L3,
    118    TCG_REG_L4,
    119    TCG_REG_L5,
    120    TCG_REG_L6,
    121    TCG_REG_L7,
    122
    123    TCG_REG_I0,
    124    TCG_REG_I1,
    125    TCG_REG_I2,
    126    TCG_REG_I3,
    127    TCG_REG_I4,
    128    TCG_REG_I5,
    129
    130    TCG_REG_G2,
    131    TCG_REG_G3,
    132    TCG_REG_G4,
    133    TCG_REG_G5,
    134
    135    TCG_REG_O0,
    136    TCG_REG_O1,
    137    TCG_REG_O2,
    138    TCG_REG_O3,
    139    TCG_REG_O4,
    140    TCG_REG_O5,
    141};
    142
    143static const int tcg_target_call_iarg_regs[6] = {
    144    TCG_REG_O0,
    145    TCG_REG_O1,
    146    TCG_REG_O2,
    147    TCG_REG_O3,
    148    TCG_REG_O4,
    149    TCG_REG_O5,
    150};
    151
    152static const int tcg_target_call_oarg_regs[] = {
    153    TCG_REG_O0,
    154    TCG_REG_O1,
    155    TCG_REG_O2,
    156    TCG_REG_O3,
    157};
    158
    159#define INSN_OP(x)  ((x) << 30)
    160#define INSN_OP2(x) ((x) << 22)
    161#define INSN_OP3(x) ((x) << 19)
    162#define INSN_OPF(x) ((x) << 5)
    163#define INSN_RD(x)  ((x) << 25)
    164#define INSN_RS1(x) ((x) << 14)
    165#define INSN_RS2(x) (x)
    166#define INSN_ASI(x) ((x) << 5)
    167
    168#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
    169#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
    170#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
    171#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
    172#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
    173#define INSN_COND(x) ((x) << 25)
    174
    175#define COND_N     0x0
    176#define COND_E     0x1
    177#define COND_LE    0x2
    178#define COND_L     0x3
    179#define COND_LEU   0x4
    180#define COND_CS    0x5
    181#define COND_NEG   0x6
    182#define COND_VS    0x7
    183#define COND_A     0x8
    184#define COND_NE    0x9
    185#define COND_G     0xa
    186#define COND_GE    0xb
    187#define COND_GU    0xc
    188#define COND_CC    0xd
    189#define COND_POS   0xe
    190#define COND_VC    0xf
    191#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
    192
    193#define RCOND_Z    1
    194#define RCOND_LEZ  2
    195#define RCOND_LZ   3
    196#define RCOND_NZ   5
    197#define RCOND_GZ   6
    198#define RCOND_GEZ  7
    199
    200#define MOVCC_ICC  (1 << 18)
    201#define MOVCC_XCC  (1 << 18 | 1 << 12)
    202
    203#define BPCC_ICC   0
    204#define BPCC_XCC   (2 << 20)
    205#define BPCC_PT    (1 << 19)
    206#define BPCC_PN    0
    207#define BPCC_A     (1 << 29)
    208
    209#define BPR_PT     BPCC_PT
    210
    211#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
    212#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
    213#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
    214#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
    215#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
    216#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
    217#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
    218#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
    219#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
    220#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
    221#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
    222#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
    223#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
    224#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
    225#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
    226#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
    227#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
    228#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
    229#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
    230#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
    231#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
    232
    233#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
    234#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
    235
    236#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
    237#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
    238#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
    239
    240#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
    241#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
    242#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
    243
    244#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
    245#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
    246#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
    247#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
    248#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
    249#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
    250#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
    251#define CALL       INSN_OP(1)
    252#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
    253#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
    254#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
    255#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
    256#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
    257#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
    258#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
    259#define STB        (INSN_OP(3) | INSN_OP3(0x05))
    260#define STH        (INSN_OP(3) | INSN_OP3(0x06))
    261#define STW        (INSN_OP(3) | INSN_OP3(0x04))
    262#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
    263#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
    264#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
    265#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
    266#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
    267#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
    268#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
    269#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
    270#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
    271#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
    272#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
    273#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
    274
    275#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
    276
    277#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
    278
    279#ifndef ASI_PRIMARY_LITTLE
    280#define ASI_PRIMARY_LITTLE 0x88
    281#endif
    282
    283#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
    284#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
    285#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
    286#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
    287#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
    288
    289#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
    290#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
    291#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
    292
    293#ifndef use_vis3_instructions
    294bool use_vis3_instructions;
    295#endif
    296
    297static bool check_fit_i64(int64_t val, unsigned int bits)
    298{
    299    return val == sextract64(val, 0, bits);
    300}
    301
    302static bool check_fit_i32(int32_t val, unsigned int bits)
    303{
    304    return val == sextract32(val, 0, bits);
    305}
    306
    307#define check_fit_tl    check_fit_i64
    308#if SPARC64
    309# define check_fit_ptr  check_fit_i64
    310#else
    311# define check_fit_ptr  check_fit_i32
    312#endif
    313
    314static bool patch_reloc(tcg_insn_unit *src_rw, int type,
    315                        intptr_t value, intptr_t addend)
    316{
    317    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
    318    uint32_t insn = *src_rw;
    319    intptr_t pcrel;
    320
    321    value += addend;
    322    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
    323
    324    switch (type) {
    325    case R_SPARC_WDISP16:
    326        assert(check_fit_ptr(pcrel >> 2, 16));
    327        insn &= ~INSN_OFF16(-1);
    328        insn |= INSN_OFF16(pcrel);
    329        break;
    330    case R_SPARC_WDISP19:
    331        assert(check_fit_ptr(pcrel >> 2, 19));
    332        insn &= ~INSN_OFF19(-1);
    333        insn |= INSN_OFF19(pcrel);
    334        break;
    335    default:
    336        g_assert_not_reached();
    337    }
    338
    339    *src_rw = insn;
    340    return true;
    341}
    342
    343/* test if a constant matches the constraint */
    344static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
    345{
    346    if (ct & TCG_CT_CONST) {
    347        return 1;
    348    }
    349
    350    if (type == TCG_TYPE_I32) {
    351        val = (int32_t)val;
    352    }
    353
    354    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
    355        return 1;
    356    } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
    357        return 1;
    358    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
    359        return 1;
    360    } else {
    361        return 0;
    362    }
    363}
    364
    365static void tcg_out_nop(TCGContext *s)
    366{
    367    tcg_out32(s, NOP);
    368}
    369
    370static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
    371                          TCGReg rs2, int op)
    372{
    373    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
    374}
    375
    376static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
    377                           int32_t offset, int op)
    378{
    379    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
    380}
    381
    382static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
    383			   int32_t val2, int val2const, int op)
    384{
    385    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
    386              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
    387}
    388
    389static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
    390{
    391    if (ret != arg) {
    392        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
    393    }
    394    return true;
    395}
    396
    397static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
    398{
    399    if (ret != arg) {
    400        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
    401    } else {
    402        tcg_out_nop(s);
    403    }
    404}
    405
    406static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
    407{
    408    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
    409}
    410
    411static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
    412{
    413    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
    414}
    415
    416static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
    417                             tcg_target_long arg, bool in_prologue)
    418{
    419    tcg_target_long hi, lo = (int32_t)arg;
    420    tcg_target_long test, lsb;
    421
    422    /* Make sure we test 32-bit constants for imm13 properly.  */
    423    if (type == TCG_TYPE_I32) {
    424        arg = lo;
    425    }
    426
    427    /* A 13-bit constant sign-extended to 64-bits.  */
    428    if (check_fit_tl(arg, 13)) {
    429        tcg_out_movi_imm13(s, ret, arg);
    430        return;
    431    }
    432
    433    /* A 13-bit constant relative to the TB.  */
    434    if (!in_prologue && USE_REG_TB) {
    435        test = tcg_tbrel_diff(s, (void *)arg);
    436        if (check_fit_ptr(test, 13)) {
    437            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
    438            return;
    439        }
    440    }
    441
    442    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
    443    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
    444        tcg_out_sethi(s, ret, arg);
    445        if (arg & 0x3ff) {
    446            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
    447        }
    448        return;
    449    }
    450
    451    /* A 32-bit constant sign-extended to 64-bits.  */
    452    if (arg == lo) {
    453        tcg_out_sethi(s, ret, ~arg);
    454        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
    455        return;
    456    }
    457
    458    /* A 21-bit constant, shifted.  */
    459    lsb = ctz64(arg);
    460    test = (tcg_target_long)arg >> lsb;
    461    if (check_fit_tl(test, 13)) {
    462        tcg_out_movi_imm13(s, ret, test);
    463        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
    464        return;
    465    } else if (lsb > 10 && test == extract64(test, 0, 21)) {
    466        tcg_out_sethi(s, ret, test << 10);
    467        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
    468        return;
    469    }
    470
    471    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
    472    if (check_fit_i32(lo, 13)) {
    473        hi = (arg - lo) >> 32;
    474        tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
    475        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
    476        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
    477    } else {
    478        hi = arg >> 32;
    479        tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
    480        tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
    481        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
    482        tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
    483    }
    484}
    485
    486static void tcg_out_movi(TCGContext *s, TCGType type,
    487                         TCGReg ret, tcg_target_long arg)
    488{
    489    tcg_out_movi_int(s, type, ret, arg, false);
    490}
    491
    492static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
    493                            TCGReg a2, int op)
    494{
    495    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
    496}
    497
    498static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
    499                         intptr_t offset, int op)
    500{
    501    if (check_fit_ptr(offset, 13)) {
    502        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
    503                  INSN_IMM13(offset));
    504    } else {
    505        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
    506        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
    507    }
    508}
    509
    510static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
    511                       TCGReg arg1, intptr_t arg2)
    512{
    513    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
    514}
    515
    516static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
    517                       TCGReg arg1, intptr_t arg2)
    518{
    519    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
    520}
    521
    522static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
    523                        TCGReg base, intptr_t ofs)
    524{
    525    if (val == 0) {
    526        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
    527        return true;
    528    }
    529    return false;
    530}
    531
    532static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
    533{
    534    intptr_t diff = tcg_tbrel_diff(s, arg);
    535    if (USE_REG_TB && check_fit_ptr(diff, 13)) {
    536        tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
    537        return;
    538    }
    539    tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
    540    tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
    541}
    542
    543static void tcg_out_sety(TCGContext *s, TCGReg rs)
    544{
    545    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
    546}
    547
    548static void tcg_out_rdy(TCGContext *s, TCGReg rd)
    549{
    550    tcg_out32(s, RDY | INSN_RD(rd));
    551}
    552
    553static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
    554                          int32_t val2, int val2const, int uns)
    555{
    556    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
    557    if (uns) {
    558        tcg_out_sety(s, TCG_REG_G0);
    559    } else {
    560        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
    561        tcg_out_sety(s, TCG_REG_T1);
    562    }
    563
    564    tcg_out_arithc(s, rd, rs1, val2, val2const,
    565                   uns ? ARITH_UDIV : ARITH_SDIV);
    566}
    567
    568static const uint8_t tcg_cond_to_bcond[] = {
    569    [TCG_COND_EQ] = COND_E,
    570    [TCG_COND_NE] = COND_NE,
    571    [TCG_COND_LT] = COND_L,
    572    [TCG_COND_GE] = COND_GE,
    573    [TCG_COND_LE] = COND_LE,
    574    [TCG_COND_GT] = COND_G,
    575    [TCG_COND_LTU] = COND_CS,
    576    [TCG_COND_GEU] = COND_CC,
    577    [TCG_COND_LEU] = COND_LEU,
    578    [TCG_COND_GTU] = COND_GU,
    579};
    580
    581static const uint8_t tcg_cond_to_rcond[] = {
    582    [TCG_COND_EQ] = RCOND_Z,
    583    [TCG_COND_NE] = RCOND_NZ,
    584    [TCG_COND_LT] = RCOND_LZ,
    585    [TCG_COND_GT] = RCOND_GZ,
    586    [TCG_COND_LE] = RCOND_LEZ,
    587    [TCG_COND_GE] = RCOND_GEZ
    588};
    589
    590static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
    591{
    592    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
    593}
    594
    595static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
    596{
    597    int off19 = 0;
    598
    599    if (l->has_value) {
    600        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
    601    } else {
    602        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
    603    }
    604    tcg_out_bpcc0(s, scond, flags, off19);
    605}
    606
    607static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
    608{
    609    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
    610}
    611
    612static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
    613                               int32_t arg2, int const_arg2, TCGLabel *l)
    614{
    615    tcg_out_cmp(s, arg1, arg2, const_arg2);
    616    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
    617    tcg_out_nop(s);
    618}
    619
    620static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
    621                          int32_t v1, int v1const)
    622{
    623    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
    624              | INSN_RS1(tcg_cond_to_bcond[cond])
    625              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
    626}
    627
    628static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
    629                                TCGReg c1, int32_t c2, int c2const,
    630                                int32_t v1, int v1const)
    631{
    632    tcg_out_cmp(s, c1, c2, c2const);
    633    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
    634}
    635
    636static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
    637                               int32_t arg2, int const_arg2, TCGLabel *l)
    638{
    639    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
    640    if (arg2 == 0 && !is_unsigned_cond(cond)) {
    641        int off16 = 0;
    642
    643        if (l->has_value) {
    644            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
    645        } else {
    646            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
    647        }
    648        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
    649                  | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
    650    } else {
    651        tcg_out_cmp(s, arg1, arg2, const_arg2);
    652        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
    653    }
    654    tcg_out_nop(s);
    655}
    656
    657static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
    658                         int32_t v1, int v1const)
    659{
    660    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
    661              | (tcg_cond_to_rcond[cond] << 10)
    662              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
    663}
    664
    665static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
    666                                TCGReg c1, int32_t c2, int c2const,
    667                                int32_t v1, int v1const)
    668{
    669    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
    670       Note that the immediate range is one bit smaller, so we must check
    671       for that as well.  */
    672    if (c2 == 0 && !is_unsigned_cond(cond)
    673        && (!v1const || check_fit_i32(v1, 10))) {
    674        tcg_out_movr(s, cond, ret, c1, v1, v1const);
    675    } else {
    676        tcg_out_cmp(s, c1, c2, c2const);
    677        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
    678    }
    679}
    680
    681static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
    682                                TCGReg c1, int32_t c2, int c2const)
    683{
    684    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
    685    switch (cond) {
    686    case TCG_COND_LTU:
    687    case TCG_COND_GEU:
    688        /* The result of the comparison is in the carry bit.  */
    689        break;
    690
    691    case TCG_COND_EQ:
    692    case TCG_COND_NE:
    693        /* For equality, we can transform to inequality vs zero.  */
    694        if (c2 != 0) {
    695            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
    696            c2 = TCG_REG_T1;
    697        } else {
    698            c2 = c1;
    699        }
    700        c1 = TCG_REG_G0, c2const = 0;
    701        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
    702	break;
    703
    704    case TCG_COND_GTU:
    705    case TCG_COND_LEU:
    706        /* If we don't need to load a constant into a register, we can
    707           swap the operands on GTU/LEU.  There's no benefit to loading
    708           the constant into a temporary register.  */
    709        if (!c2const || c2 == 0) {
    710            TCGReg t = c1;
    711            c1 = c2;
    712            c2 = t;
    713            c2const = 0;
    714            cond = tcg_swap_cond(cond);
    715            break;
    716        }
    717        /* FALLTHRU */
    718
    719    default:
    720        tcg_out_cmp(s, c1, c2, c2const);
    721        tcg_out_movi_imm13(s, ret, 0);
    722        tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
    723        return;
    724    }
    725
    726    tcg_out_cmp(s, c1, c2, c2const);
    727    if (cond == TCG_COND_LTU) {
    728        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
    729    } else {
    730        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
    731    }
    732}
    733
    734static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
    735                                TCGReg c1, int32_t c2, int c2const)
    736{
    737    if (use_vis3_instructions) {
    738        switch (cond) {
    739        case TCG_COND_NE:
    740            if (c2 != 0) {
    741                break;
    742            }
    743            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
    744            /* FALLTHRU */
    745        case TCG_COND_LTU:
    746            tcg_out_cmp(s, c1, c2, c2const);
    747            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
    748            return;
    749        default:
    750            break;
    751        }
    752    }
    753
    754    /* For 64-bit signed comparisons vs zero, we can avoid the compare
    755       if the input does not overlap the output.  */
    756    if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
    757        tcg_out_movi_imm13(s, ret, 0);
    758        tcg_out_movr(s, cond, ret, c1, 1, 1);
    759    } else {
    760        tcg_out_cmp(s, c1, c2, c2const);
    761        tcg_out_movi_imm13(s, ret, 0);
    762        tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
    763    }
    764}
    765
    766static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
    767                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
    768                                int32_t bh, int bhconst, int opl, int oph)
    769{
    770    TCGReg tmp = TCG_REG_T1;
    771
    772    /* Note that the low parts are fully consumed before tmp is set.  */
    773    if (rl != ah && (bhconst || rl != bh)) {
    774        tmp = rl;
    775    }
    776
    777    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
    778    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
    779    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
    780}
    781
    782static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
    783                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
    784                                int32_t bh, int bhconst, bool is_sub)
    785{
    786    TCGReg tmp = TCG_REG_T1;
    787
    788    /* Note that the low parts are fully consumed before tmp is set.  */
    789    if (rl != ah && (bhconst || rl != bh)) {
    790        tmp = rl;
    791    }
    792
    793    tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
    794
    795    if (use_vis3_instructions && !is_sub) {
    796        /* Note that ADDXC doesn't accept immediates.  */
    797        if (bhconst && bh != 0) {
    798           tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
    799           bh = TCG_REG_T2;
    800        }
    801        tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
    802    } else if (bh == TCG_REG_G0) {
    803	/* If we have a zero, we can perform the operation in two insns,
    804           with the arithmetic first, and a conditional move into place.  */
    805	if (rh == ah) {
    806            tcg_out_arithi(s, TCG_REG_T2, ah, 1,
    807			   is_sub ? ARITH_SUB : ARITH_ADD);
    808            tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
    809	} else {
    810            tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
    811	    tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
    812	}
    813    } else {
    814        /* Otherwise adjust BH as if there is carry into T2 ... */
    815        if (bhconst) {
    816            tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
    817        } else {
    818            tcg_out_arithi(s, TCG_REG_T2, bh, 1,
    819                           is_sub ? ARITH_SUB : ARITH_ADD);
    820        }
    821        /* ... smoosh T2 back to original BH if carry is clear ... */
    822        tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
    823	/* ... and finally perform the arithmetic with the new operand.  */
    824        tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
    825    }
    826
    827    tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
    828}
    829
    830static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
    831                                 bool in_prologue)
    832{
    833    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
    834
    835    if (disp == (int32_t)disp) {
    836        tcg_out32(s, CALL | (uint32_t)disp >> 2);
    837    } else {
    838        uintptr_t desti = (uintptr_t)dest;
    839        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
    840                         desti & ~0xfff, in_prologue);
    841        tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
    842    }
    843}
    844
    845static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
    846{
    847    tcg_out_call_nodelay(s, dest, false);
    848    tcg_out_nop(s);
    849}
    850
    851static void tcg_out_mb(TCGContext *s, TCGArg a0)
    852{
    853    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
    854    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
    855}
    856
    857#ifdef CONFIG_SOFTMMU
    858static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
    859static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
    860
    861static void emit_extend(TCGContext *s, TCGReg r, int op)
    862{
    863    /* Emit zero extend of 8, 16 or 32 bit data as
    864     * required by the MO_* value op; do nothing for 64 bit.
    865     */
    866    switch (op & MO_SIZE) {
    867    case MO_8:
    868        tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
    869        break;
    870    case MO_16:
    871        tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
    872        tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
    873        break;
    874    case MO_32:
    875        if (SPARC64) {
    876            tcg_out_arith(s, r, r, 0, SHIFT_SRL);
    877        }
    878        break;
    879    case MO_64:
    880        break;
    881    }
    882}
    883
    884static void build_trampolines(TCGContext *s)
    885{
    886    static void * const qemu_ld_helpers[] = {
    887        [MO_UB]   = helper_ret_ldub_mmu,
    888        [MO_SB]   = helper_ret_ldsb_mmu,
    889        [MO_LEUW] = helper_le_lduw_mmu,
    890        [MO_LESW] = helper_le_ldsw_mmu,
    891        [MO_LEUL] = helper_le_ldul_mmu,
    892        [MO_LEQ]  = helper_le_ldq_mmu,
    893        [MO_BEUW] = helper_be_lduw_mmu,
    894        [MO_BESW] = helper_be_ldsw_mmu,
    895        [MO_BEUL] = helper_be_ldul_mmu,
    896        [MO_BEQ]  = helper_be_ldq_mmu,
    897    };
    898    static void * const qemu_st_helpers[] = {
    899        [MO_UB]   = helper_ret_stb_mmu,
    900        [MO_LEUW] = helper_le_stw_mmu,
    901        [MO_LEUL] = helper_le_stl_mmu,
    902        [MO_LEQ]  = helper_le_stq_mmu,
    903        [MO_BEUW] = helper_be_stw_mmu,
    904        [MO_BEUL] = helper_be_stl_mmu,
    905        [MO_BEQ]  = helper_be_stq_mmu,
    906    };
    907
    908    int i;
    909    TCGReg ra;
    910
    911    for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
    912        if (qemu_ld_helpers[i] == NULL) {
    913            continue;
    914        }
    915
    916        /* May as well align the trampoline.  */
    917        while ((uintptr_t)s->code_ptr & 15) {
    918            tcg_out_nop(s);
    919        }
    920        qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
    921
    922        if (SPARC64 || TARGET_LONG_BITS == 32) {
    923            ra = TCG_REG_O3;
    924        } else {
    925            /* Install the high part of the address.  */
    926            tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
    927            ra = TCG_REG_O4;
    928        }
    929
    930        /* Set the retaddr operand.  */
    931        tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
    932        /* Set the env operand.  */
    933        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
    934        /* Tail call.  */
    935        tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
    936        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
    937    }
    938
    939    for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
    940        if (qemu_st_helpers[i] == NULL) {
    941            continue;
    942        }
    943
    944        /* May as well align the trampoline.  */
    945        while ((uintptr_t)s->code_ptr & 15) {
    946            tcg_out_nop(s);
    947        }
    948        qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
    949
    950        if (SPARC64) {
    951            emit_extend(s, TCG_REG_O2, i);
    952            ra = TCG_REG_O4;
    953        } else {
    954            ra = TCG_REG_O1;
    955            if (TARGET_LONG_BITS == 64) {
    956                /* Install the high part of the address.  */
    957                tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
    958                ra += 2;
    959            } else {
    960                ra += 1;
    961            }
    962            if ((i & MO_SIZE) == MO_64) {
    963                /* Install the high part of the data.  */
    964                tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
    965                ra += 2;
    966            } else {
    967                emit_extend(s, ra, i);
    968                ra += 1;
    969            }
    970            /* Skip the oi argument.  */
    971            ra += 1;
    972        }
    973                
    974        /* Set the retaddr operand.  */
    975        if (ra >= TCG_REG_O6) {
    976            tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
    977                       TCG_TARGET_CALL_STACK_OFFSET);
    978            ra = TCG_REG_G1;
    979        }
    980        tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
    981        /* Set the env operand.  */
    982        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
    983        /* Tail call.  */
    984        tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
    985        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
    986    }
    987}
    988#endif
    989
    990/* Generate global QEMU prologue and epilogue code */
    991static void tcg_target_qemu_prologue(TCGContext *s)
    992{
    993    int tmp_buf_size, frame_size;
    994
    995    /*
    996     * The TCG temp buffer is at the top of the frame, immediately
    997     * below the frame pointer.  Use the logical (aligned) offset here;
    998     * the stack bias is applied in temp_allocate_frame().
    999     */
   1000    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
   1001    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
   1002
   1003    /*
   1004     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
   1005     * otherwise the minimal frame usable by callees.
   1006     */
   1007    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
   1008    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
   1009    frame_size += TCG_TARGET_STACK_ALIGN - 1;
   1010    frame_size &= -TCG_TARGET_STACK_ALIGN;
   1011    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
   1012              INSN_IMM13(-frame_size));
   1013
   1014#ifndef CONFIG_SOFTMMU
   1015    if (guest_base != 0) {
   1016        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
   1017        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
   1018    }
   1019#endif
   1020
   1021    /* We choose TCG_REG_TB such that no move is required.  */
   1022    if (USE_REG_TB) {
   1023        QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
   1024        tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
   1025    }
   1026
   1027    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
   1028    /* delay slot */
   1029    tcg_out_nop(s);
   1030
   1031    /* Epilogue for goto_ptr.  */
   1032    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
   1033    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
   1034    /* delay slot */
   1035    tcg_out_movi_imm13(s, TCG_REG_O0, 0);
   1036
   1037#ifdef CONFIG_SOFTMMU
   1038    build_trampolines(s);
   1039#endif
   1040}
   1041
   1042static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
   1043{
   1044    int i;
   1045    for (i = 0; i < count; ++i) {
   1046        p[i] = NOP;
   1047    }
   1048}
   1049
   1050#if defined(CONFIG_SOFTMMU)
   1051
   1052/* We expect to use a 13-bit negative offset from ENV.  */
   1053QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
   1054QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
   1055
   1056/* Perform the TLB load and compare.
   1057
   1058   Inputs:
   1059   ADDRLO and ADDRHI contain the possible two parts of the address.
   1060
   1061   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
   1062
   1063   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
   1064   This should be offsetof addr_read or addr_write.
   1065
   1066   The result of the TLB comparison is in %[ix]cc.  The sanitized address
   1067   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
   1068
   1069static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
   1070                               MemOp opc, int which)
   1071{
   1072    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
   1073    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
   1074    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
   1075    const TCGReg r0 = TCG_REG_O0;
   1076    const TCGReg r1 = TCG_REG_O1;
   1077    const TCGReg r2 = TCG_REG_O2;
   1078    unsigned s_bits = opc & MO_SIZE;
   1079    unsigned a_bits = get_alignment_bits(opc);
   1080    tcg_target_long compare_mask;
   1081
   1082    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
   1083    tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
   1084    tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
   1085
   1086    /* Extract the page index, shifted into place for tlb index.  */
   1087    tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
   1088                   SHIFT_SRL);
   1089    tcg_out_arith(s, r2, r2, r0, ARITH_AND);
   1090
   1091    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
   1092    tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
   1093
   1094    /* Load the tlb comparator and the addend.  */
   1095    tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
   1096    tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
   1097
   1098    /* Mask out the page offset, except for the required alignment.
   1099       We don't support unaligned accesses.  */
   1100    if (a_bits < s_bits) {
   1101        a_bits = s_bits;
   1102    }
   1103    compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
   1104    if (check_fit_tl(compare_mask, 13)) {
   1105        tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
   1106    } else {
   1107        tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
   1108        tcg_out_arith(s, r2, addr, r2, ARITH_AND);
   1109    }
   1110    tcg_out_cmp(s, r0, r2, 0);
   1111
   1112    /* If the guest address must be zero-extended, do so now.  */
   1113    if (SPARC64 && TARGET_LONG_BITS == 32) {
   1114        tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
   1115        return r0;
   1116    }
   1117    return addr;
   1118}
   1119#endif /* CONFIG_SOFTMMU */
   1120
   1121static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
   1122    [MO_UB]   = LDUB,
   1123    [MO_SB]   = LDSB,
   1124
   1125    [MO_BEUW] = LDUH,
   1126    [MO_BESW] = LDSH,
   1127    [MO_BEUL] = LDUW,
   1128    [MO_BESL] = LDSW,
   1129    [MO_BEQ]  = LDX,
   1130
   1131    [MO_LEUW] = LDUH_LE,
   1132    [MO_LESW] = LDSH_LE,
   1133    [MO_LEUL] = LDUW_LE,
   1134    [MO_LESL] = LDSW_LE,
   1135    [MO_LEQ]  = LDX_LE,
   1136};
   1137
   1138static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
   1139    [MO_UB]   = STB,
   1140
   1141    [MO_BEUW] = STH,
   1142    [MO_BEUL] = STW,
   1143    [MO_BEQ]  = STX,
   1144
   1145    [MO_LEUW] = STH_LE,
   1146    [MO_LEUL] = STW_LE,
   1147    [MO_LEQ]  = STX_LE,
   1148};
   1149
   1150static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
   1151                            MemOpIdx oi, bool is_64)
   1152{
   1153    MemOp memop = get_memop(oi);
   1154#ifdef CONFIG_SOFTMMU
   1155    unsigned memi = get_mmuidx(oi);
   1156    TCGReg addrz, param;
   1157    const tcg_insn_unit *func;
   1158    tcg_insn_unit *label_ptr;
   1159
   1160    addrz = tcg_out_tlb_load(s, addr, memi, memop,
   1161                             offsetof(CPUTLBEntry, addr_read));
   1162
   1163    /* The fast path is exactly one insn.  Thus we can perform the
   1164       entire TLB Hit in the (annulled) delay slot of the branch
   1165       over the TLB Miss case.  */
   1166
   1167    /* beq,a,pt %[xi]cc, label0 */
   1168    label_ptr = s->code_ptr;
   1169    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
   1170                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
   1171    /* delay slot */
   1172    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
   1173                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
   1174
   1175    /* TLB Miss.  */
   1176
   1177    param = TCG_REG_O1;
   1178    if (!SPARC64 && TARGET_LONG_BITS == 64) {
   1179        /* Skip the high-part; we'll perform the extract in the trampoline.  */
   1180        param++;
   1181    }
   1182    tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
   1183
   1184    /* We use the helpers to extend SB and SW data, leaving the case
   1185       of SL needing explicit extending below.  */
   1186    if ((memop & MO_SSIZE) == MO_SL) {
   1187        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
   1188    } else {
   1189        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
   1190    }
   1191    tcg_debug_assert(func != NULL);
   1192    tcg_out_call_nodelay(s, func, false);
   1193    /* delay slot */
   1194    tcg_out_movi(s, TCG_TYPE_I32, param, oi);
   1195
   1196    /* Recall that all of the helpers return 64-bit results.
   1197       Which complicates things for sparcv8plus.  */
   1198    if (SPARC64) {
   1199        /* We let the helper sign-extend SB and SW, but leave SL for here.  */
   1200        if (is_64 && (memop & MO_SSIZE) == MO_SL) {
   1201            tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
   1202        } else {
   1203            tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
   1204        }
   1205    } else {
   1206        if ((memop & MO_SIZE) == MO_64) {
   1207            tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
   1208            tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
   1209            tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
   1210        } else if (is_64) {
   1211            /* Re-extend from 32-bit rather than reassembling when we
   1212               know the high register must be an extension.  */
   1213            tcg_out_arithi(s, data, TCG_REG_O1, 0,
   1214                           memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
   1215        } else {
   1216            tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
   1217        }
   1218    }
   1219
   1220    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
   1221#else
   1222    if (SPARC64 && TARGET_LONG_BITS == 32) {
   1223        tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
   1224        addr = TCG_REG_T1;
   1225    }
   1226    tcg_out_ldst_rr(s, data, addr,
   1227                    (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
   1228                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
   1229#endif /* CONFIG_SOFTMMU */
   1230}
   1231
   1232static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
   1233                            MemOpIdx oi)
   1234{
   1235    MemOp memop = get_memop(oi);
   1236#ifdef CONFIG_SOFTMMU
   1237    unsigned memi = get_mmuidx(oi);
   1238    TCGReg addrz, param;
   1239    const tcg_insn_unit *func;
   1240    tcg_insn_unit *label_ptr;
   1241
   1242    addrz = tcg_out_tlb_load(s, addr, memi, memop,
   1243                             offsetof(CPUTLBEntry, addr_write));
   1244
   1245    /* The fast path is exactly one insn.  Thus we can perform the entire
   1246       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
   1247    /* beq,a,pt %[xi]cc, label0 */
   1248    label_ptr = s->code_ptr;
   1249    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
   1250                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
   1251    /* delay slot */
   1252    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
   1253                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
   1254
   1255    /* TLB Miss.  */
   1256
   1257    param = TCG_REG_O1;
   1258    if (!SPARC64 && TARGET_LONG_BITS == 64) {
   1259        /* Skip the high-part; we'll perform the extract in the trampoline.  */
   1260        param++;
   1261    }
   1262    tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
   1263    if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
   1264        /* Skip the high-part; we'll perform the extract in the trampoline.  */
   1265        param++;
   1266    }
   1267    tcg_out_mov(s, TCG_TYPE_REG, param++, data);
   1268
   1269    func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
   1270    tcg_debug_assert(func != NULL);
   1271    tcg_out_call_nodelay(s, func, false);
   1272    /* delay slot */
   1273    tcg_out_movi(s, TCG_TYPE_I32, param, oi);
   1274
   1275    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
   1276#else
   1277    if (SPARC64 && TARGET_LONG_BITS == 32) {
   1278        tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
   1279        addr = TCG_REG_T1;
   1280    }
   1281    tcg_out_ldst_rr(s, data, addr,
   1282                    (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
   1283                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
   1284#endif /* CONFIG_SOFTMMU */
   1285}
   1286
   1287static void tcg_out_op(TCGContext *s, TCGOpcode opc,
   1288                       const TCGArg args[TCG_MAX_OP_ARGS],
   1289                       const int const_args[TCG_MAX_OP_ARGS])
   1290{
   1291    TCGArg a0, a1, a2;
   1292    int c, c2;
   1293
   1294    /* Hoist the loads of the most common arguments.  */
   1295    a0 = args[0];
   1296    a1 = args[1];
   1297    a2 = args[2];
   1298    c2 = const_args[2];
   1299
   1300    switch (opc) {
   1301    case INDEX_op_exit_tb:
   1302        if (check_fit_ptr(a0, 13)) {
   1303            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
   1304            tcg_out_movi_imm13(s, TCG_REG_O0, a0);
   1305            break;
   1306        } else if (USE_REG_TB) {
   1307            intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
   1308            if (check_fit_ptr(tb_diff, 13)) {
   1309                tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
   1310                /* Note that TCG_REG_TB has been unwound to O1.  */
   1311                tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
   1312                break;
   1313            }
   1314        }
   1315        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
   1316        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
   1317        tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
   1318        break;
   1319    case INDEX_op_goto_tb:
   1320        if (s->tb_jmp_insn_offset) {
   1321            /* direct jump method */
   1322            if (USE_REG_TB) {
   1323                /* make sure the patch is 8-byte aligned.  */
   1324                if ((intptr_t)s->code_ptr & 4) {
   1325                    tcg_out_nop(s);
   1326                }
   1327                s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
   1328                tcg_out_sethi(s, TCG_REG_T1, 0);
   1329                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
   1330                tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
   1331                tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
   1332            } else {
   1333                s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
   1334                tcg_out32(s, CALL);
   1335                tcg_out_nop(s);
   1336            }
   1337        } else {
   1338            /* indirect jump method */
   1339            tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
   1340            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
   1341            tcg_out_nop(s);
   1342        }
   1343        set_jmp_reset_offset(s, a0);
   1344
   1345        /* For the unlinked path of goto_tb, we need to reset
   1346           TCG_REG_TB to the beginning of this TB.  */
   1347        if (USE_REG_TB) {
   1348            c = -tcg_current_code_size(s);
   1349            if (check_fit_i32(c, 13)) {
   1350                tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
   1351            } else {
   1352                tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
   1353                tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
   1354                              TCG_REG_T1, ARITH_ADD);
   1355            }
   1356        }
   1357        break;
   1358    case INDEX_op_goto_ptr:
   1359        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
   1360        if (USE_REG_TB) {
   1361            tcg_out_mov_delay(s, TCG_REG_TB, a0);
   1362        } else {
   1363            tcg_out_nop(s);
   1364        }
   1365        break;
   1366    case INDEX_op_br:
   1367        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
   1368        tcg_out_nop(s);
   1369        break;
   1370
   1371#define OP_32_64(x)                             \
   1372        glue(glue(case INDEX_op_, x), _i32):    \
   1373        glue(glue(case INDEX_op_, x), _i64)
   1374
   1375    OP_32_64(ld8u):
   1376        tcg_out_ldst(s, a0, a1, a2, LDUB);
   1377        break;
   1378    OP_32_64(ld8s):
   1379        tcg_out_ldst(s, a0, a1, a2, LDSB);
   1380        break;
   1381    OP_32_64(ld16u):
   1382        tcg_out_ldst(s, a0, a1, a2, LDUH);
   1383        break;
   1384    OP_32_64(ld16s):
   1385        tcg_out_ldst(s, a0, a1, a2, LDSH);
   1386        break;
   1387    case INDEX_op_ld_i32:
   1388    case INDEX_op_ld32u_i64:
   1389        tcg_out_ldst(s, a0, a1, a2, LDUW);
   1390        break;
   1391    OP_32_64(st8):
   1392        tcg_out_ldst(s, a0, a1, a2, STB);
   1393        break;
   1394    OP_32_64(st16):
   1395        tcg_out_ldst(s, a0, a1, a2, STH);
   1396        break;
   1397    case INDEX_op_st_i32:
   1398    case INDEX_op_st32_i64:
   1399        tcg_out_ldst(s, a0, a1, a2, STW);
   1400        break;
   1401    OP_32_64(add):
   1402        c = ARITH_ADD;
   1403        goto gen_arith;
   1404    OP_32_64(sub):
   1405        c = ARITH_SUB;
   1406        goto gen_arith;
   1407    OP_32_64(and):
   1408        c = ARITH_AND;
   1409        goto gen_arith;
   1410    OP_32_64(andc):
   1411        c = ARITH_ANDN;
   1412        goto gen_arith;
   1413    OP_32_64(or):
   1414        c = ARITH_OR;
   1415        goto gen_arith;
   1416    OP_32_64(orc):
   1417        c = ARITH_ORN;
   1418        goto gen_arith;
   1419    OP_32_64(xor):
   1420        c = ARITH_XOR;
   1421        goto gen_arith;
   1422    case INDEX_op_shl_i32:
   1423        c = SHIFT_SLL;
   1424    do_shift32:
   1425        /* Limit immediate shift count lest we create an illegal insn.  */
   1426        tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
   1427        break;
   1428    case INDEX_op_shr_i32:
   1429        c = SHIFT_SRL;
   1430        goto do_shift32;
   1431    case INDEX_op_sar_i32:
   1432        c = SHIFT_SRA;
   1433        goto do_shift32;
   1434    case INDEX_op_mul_i32:
   1435        c = ARITH_UMUL;
   1436        goto gen_arith;
   1437
   1438    OP_32_64(neg):
   1439	c = ARITH_SUB;
   1440	goto gen_arith1;
   1441    OP_32_64(not):
   1442	c = ARITH_ORN;
   1443	goto gen_arith1;
   1444
   1445    case INDEX_op_div_i32:
   1446        tcg_out_div32(s, a0, a1, a2, c2, 0);
   1447        break;
   1448    case INDEX_op_divu_i32:
   1449        tcg_out_div32(s, a0, a1, a2, c2, 1);
   1450        break;
   1451
   1452    case INDEX_op_brcond_i32:
   1453        tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
   1454        break;
   1455    case INDEX_op_setcond_i32:
   1456        tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
   1457        break;
   1458    case INDEX_op_movcond_i32:
   1459        tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
   1460        break;
   1461
   1462    case INDEX_op_add2_i32:
   1463        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
   1464                            args[4], const_args[4], args[5], const_args[5],
   1465                            ARITH_ADDCC, ARITH_ADDC);
   1466        break;
   1467    case INDEX_op_sub2_i32:
   1468        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
   1469                            args[4], const_args[4], args[5], const_args[5],
   1470                            ARITH_SUBCC, ARITH_SUBC);
   1471        break;
   1472    case INDEX_op_mulu2_i32:
   1473        c = ARITH_UMUL;
   1474        goto do_mul2;
   1475    case INDEX_op_muls2_i32:
   1476        c = ARITH_SMUL;
   1477    do_mul2:
   1478        /* The 32-bit multiply insns produce a full 64-bit result.  If the
   1479           destination register can hold it, we can avoid the slower RDY.  */
   1480        tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
   1481        if (SPARC64 || a0 <= TCG_REG_O7) {
   1482            tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
   1483        } else {
   1484            tcg_out_rdy(s, a1);
   1485        }
   1486        break;
   1487
   1488    case INDEX_op_qemu_ld_i32:
   1489        tcg_out_qemu_ld(s, a0, a1, a2, false);
   1490        break;
   1491    case INDEX_op_qemu_ld_i64:
   1492        tcg_out_qemu_ld(s, a0, a1, a2, true);
   1493        break;
   1494    case INDEX_op_qemu_st_i32:
   1495    case INDEX_op_qemu_st_i64:
   1496        tcg_out_qemu_st(s, a0, a1, a2);
   1497        break;
   1498
   1499    case INDEX_op_ld32s_i64:
   1500        tcg_out_ldst(s, a0, a1, a2, LDSW);
   1501        break;
   1502    case INDEX_op_ld_i64:
   1503        tcg_out_ldst(s, a0, a1, a2, LDX);
   1504        break;
   1505    case INDEX_op_st_i64:
   1506        tcg_out_ldst(s, a0, a1, a2, STX);
   1507        break;
   1508    case INDEX_op_shl_i64:
   1509        c = SHIFT_SLLX;
   1510    do_shift64:
   1511        /* Limit immediate shift count lest we create an illegal insn.  */
   1512        tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
   1513        break;
   1514    case INDEX_op_shr_i64:
   1515        c = SHIFT_SRLX;
   1516        goto do_shift64;
   1517    case INDEX_op_sar_i64:
   1518        c = SHIFT_SRAX;
   1519        goto do_shift64;
   1520    case INDEX_op_mul_i64:
   1521        c = ARITH_MULX;
   1522        goto gen_arith;
   1523    case INDEX_op_div_i64:
   1524        c = ARITH_SDIVX;
   1525        goto gen_arith;
   1526    case INDEX_op_divu_i64:
   1527        c = ARITH_UDIVX;
   1528        goto gen_arith;
   1529    case INDEX_op_ext_i32_i64:
   1530    case INDEX_op_ext32s_i64:
   1531        tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
   1532        break;
   1533    case INDEX_op_extu_i32_i64:
   1534    case INDEX_op_ext32u_i64:
   1535        tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
   1536        break;
   1537    case INDEX_op_extrl_i64_i32:
   1538        tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
   1539        break;
   1540    case INDEX_op_extrh_i64_i32:
   1541        tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
   1542        break;
   1543
   1544    case INDEX_op_brcond_i64:
   1545        tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
   1546        break;
   1547    case INDEX_op_setcond_i64:
   1548        tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
   1549        break;
   1550    case INDEX_op_movcond_i64:
   1551        tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
   1552        break;
   1553    case INDEX_op_add2_i64:
   1554        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
   1555                            const_args[4], args[5], const_args[5], false);
   1556        break;
   1557    case INDEX_op_sub2_i64:
   1558        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
   1559                            const_args[4], args[5], const_args[5], true);
   1560        break;
   1561    case INDEX_op_muluh_i64:
   1562        tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
   1563        break;
   1564
   1565    gen_arith:
   1566        tcg_out_arithc(s, a0, a1, a2, c2, c);
   1567        break;
   1568
   1569    gen_arith1:
   1570	tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
   1571	break;
   1572
   1573    case INDEX_op_mb:
   1574        tcg_out_mb(s, a0);
   1575        break;
   1576
   1577    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
   1578    case INDEX_op_mov_i64:
   1579    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
   1580    default:
   1581        tcg_abort();
   1582    }
   1583}
   1584
   1585static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
   1586{
   1587    switch (op) {
   1588    case INDEX_op_goto_ptr:
   1589        return C_O0_I1(r);
   1590
   1591    case INDEX_op_ld8u_i32:
   1592    case INDEX_op_ld8s_i32:
   1593    case INDEX_op_ld16u_i32:
   1594    case INDEX_op_ld16s_i32:
   1595    case INDEX_op_ld_i32:
   1596    case INDEX_op_neg_i32:
   1597    case INDEX_op_not_i32:
   1598        return C_O1_I1(r, r);
   1599
   1600    case INDEX_op_st8_i32:
   1601    case INDEX_op_st16_i32:
   1602    case INDEX_op_st_i32:
   1603        return C_O0_I2(rZ, r);
   1604
   1605    case INDEX_op_add_i32:
   1606    case INDEX_op_mul_i32:
   1607    case INDEX_op_div_i32:
   1608    case INDEX_op_divu_i32:
   1609    case INDEX_op_sub_i32:
   1610    case INDEX_op_and_i32:
   1611    case INDEX_op_andc_i32:
   1612    case INDEX_op_or_i32:
   1613    case INDEX_op_orc_i32:
   1614    case INDEX_op_xor_i32:
   1615    case INDEX_op_shl_i32:
   1616    case INDEX_op_shr_i32:
   1617    case INDEX_op_sar_i32:
   1618    case INDEX_op_setcond_i32:
   1619        return C_O1_I2(r, rZ, rJ);
   1620
   1621    case INDEX_op_brcond_i32:
   1622        return C_O0_I2(rZ, rJ);
   1623    case INDEX_op_movcond_i32:
   1624        return C_O1_I4(r, rZ, rJ, rI, 0);
   1625    case INDEX_op_add2_i32:
   1626    case INDEX_op_sub2_i32:
   1627        return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
   1628    case INDEX_op_mulu2_i32:
   1629    case INDEX_op_muls2_i32:
   1630        return C_O2_I2(r, r, rZ, rJ);
   1631
   1632    case INDEX_op_ld8u_i64:
   1633    case INDEX_op_ld8s_i64:
   1634    case INDEX_op_ld16u_i64:
   1635    case INDEX_op_ld16s_i64:
   1636    case INDEX_op_ld32u_i64:
   1637    case INDEX_op_ld32s_i64:
   1638    case INDEX_op_ld_i64:
   1639    case INDEX_op_ext_i32_i64:
   1640    case INDEX_op_extu_i32_i64:
   1641        return C_O1_I1(R, r);
   1642
   1643    case INDEX_op_st8_i64:
   1644    case INDEX_op_st16_i64:
   1645    case INDEX_op_st32_i64:
   1646    case INDEX_op_st_i64:
   1647        return C_O0_I2(RZ, r);
   1648
   1649    case INDEX_op_add_i64:
   1650    case INDEX_op_mul_i64:
   1651    case INDEX_op_div_i64:
   1652    case INDEX_op_divu_i64:
   1653    case INDEX_op_sub_i64:
   1654    case INDEX_op_and_i64:
   1655    case INDEX_op_andc_i64:
   1656    case INDEX_op_or_i64:
   1657    case INDEX_op_orc_i64:
   1658    case INDEX_op_xor_i64:
   1659    case INDEX_op_shl_i64:
   1660    case INDEX_op_shr_i64:
   1661    case INDEX_op_sar_i64:
   1662    case INDEX_op_setcond_i64:
   1663        return C_O1_I2(R, RZ, RJ);
   1664
   1665    case INDEX_op_neg_i64:
   1666    case INDEX_op_not_i64:
   1667    case INDEX_op_ext32s_i64:
   1668    case INDEX_op_ext32u_i64:
   1669        return C_O1_I1(R, R);
   1670
   1671    case INDEX_op_extrl_i64_i32:
   1672    case INDEX_op_extrh_i64_i32:
   1673        return C_O1_I1(r, R);
   1674
   1675    case INDEX_op_brcond_i64:
   1676        return C_O0_I2(RZ, RJ);
   1677    case INDEX_op_movcond_i64:
   1678        return C_O1_I4(R, RZ, RJ, RI, 0);
   1679    case INDEX_op_add2_i64:
   1680    case INDEX_op_sub2_i64:
   1681        return C_O2_I4(R, R, RZ, RZ, RJ, RI);
   1682    case INDEX_op_muluh_i64:
   1683        return C_O1_I2(R, R, R);
   1684
   1685    case INDEX_op_qemu_ld_i32:
   1686        return C_O1_I1(r, A);
   1687    case INDEX_op_qemu_ld_i64:
   1688        return C_O1_I1(R, A);
   1689    case INDEX_op_qemu_st_i32:
   1690        return C_O0_I2(sZ, A);
   1691    case INDEX_op_qemu_st_i64:
   1692        return C_O0_I2(SZ, A);
   1693
   1694    default:
   1695        g_assert_not_reached();
   1696    }
   1697}
   1698
   1699static void tcg_target_init(TCGContext *s)
   1700{
   1701    /*
   1702     * Only probe for the platform and capabilities if we haven't already
   1703     * determined maximum values at compile time.
   1704     */
   1705#ifndef use_vis3_instructions
   1706    {
   1707        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
   1708        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
   1709    }
   1710#endif
   1711
   1712    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
   1713    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
   1714
   1715    tcg_target_call_clobber_regs = 0;
   1716    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
   1717    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
   1718    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
   1719    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
   1720    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
   1721    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
   1722    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
   1723    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
   1724    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
   1725    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
   1726    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
   1727    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
   1728    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
   1729    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
   1730    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
   1731
   1732    s->reserved_regs = 0;
   1733    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
   1734    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
   1735    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
   1736    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
   1737    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
   1738    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
   1739    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
   1740    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
   1741}
   1742
   1743#if SPARC64
   1744# define ELF_HOST_MACHINE  EM_SPARCV9
   1745#else
   1746# define ELF_HOST_MACHINE  EM_SPARC32PLUS
   1747# define ELF_HOST_FLAGS    EF_SPARC_32PLUS
   1748#endif
   1749
   1750typedef struct {
   1751    DebugFrameHeader h;
   1752    uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
   1753    uint8_t fde_win_save;
   1754    uint8_t fde_ret_save[3];
   1755} DebugFrame;
   1756
   1757static const DebugFrame debug_frame = {
   1758    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
   1759    .h.cie.id = -1,
   1760    .h.cie.version = 1,
   1761    .h.cie.code_align = 1,
   1762    .h.cie.data_align = -sizeof(void *) & 0x7f,
   1763    .h.cie.return_column = 15,            /* o7 */
   1764
   1765    /* Total FDE size does not include the "len" member.  */
   1766    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
   1767
   1768    .fde_def_cfa = {
   1769#if SPARC64
   1770        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
   1771        (2047 & 0x7f) | 0x80, (2047 >> 7)
   1772#else
   1773        13, 30                          /* DW_CFA_def_cfa_register i6 */
   1774#endif
   1775    },
   1776    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
   1777    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
   1778};
   1779
   1780void tcg_register_jit(const void *buf, size_t buf_size)
   1781{
   1782    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
   1783}
   1784
   1785void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
   1786                              uintptr_t jmp_rw, uintptr_t addr)
   1787{
   1788    intptr_t tb_disp = addr - tc_ptr;
   1789    intptr_t br_disp = addr - jmp_rx;
   1790    tcg_insn_unit i1, i2;
   1791
   1792    /* We can reach the entire address space for ILP32.
   1793       For LP64, the code_gen_buffer can't be larger than 2GB.  */
   1794    tcg_debug_assert(tb_disp == (int32_t)tb_disp);
   1795    tcg_debug_assert(br_disp == (int32_t)br_disp);
   1796
   1797    if (!USE_REG_TB) {
   1798        qatomic_set((uint32_t *)jmp_rw,
   1799		    deposit32(CALL, 0, 30, br_disp >> 2));
   1800        flush_idcache_range(jmp_rx, jmp_rw, 4);
   1801        return;
   1802    }
   1803
   1804    /* This does not exercise the range of the branch, but we do
   1805       still need to be able to load the new value of TCG_REG_TB.
   1806       But this does still happen quite often.  */
   1807    if (check_fit_ptr(tb_disp, 13)) {
   1808        /* ba,pt %icc, addr */
   1809        i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
   1810              | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
   1811        i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
   1812              | INSN_IMM13(tb_disp));
   1813    } else if (tb_disp >= 0) {
   1814        i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
   1815        i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
   1816              | INSN_IMM13(tb_disp & 0x3ff));
   1817    } else {
   1818        i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
   1819        i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
   1820              | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
   1821    }
   1822
   1823    qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
   1824    flush_idcache_range(jmp_rx, jmp_rw, 8);
   1825}