cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

translate.c (179747B)


      1/*
      2 *  S/390 translation
      3 *
      4 *  Copyright (c) 2009 Ulrich Hecht
      5 *  Copyright (c) 2010 Alexander Graf
      6 *
      7 * This library is free software; you can redistribute it and/or
      8 * modify it under the terms of the GNU Lesser General Public
      9 * License as published by the Free Software Foundation; either
     10 * version 2.1 of the License, or (at your option) any later version.
     11 *
     12 * This library is distributed in the hope that it will be useful,
     13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15 * Lesser General Public License for more details.
     16 *
     17 * You should have received a copy of the GNU Lesser General Public
     18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     19 */
     20
     21/* #define DEBUG_INLINE_BRANCHES */
     22#define S390X_DEBUG_DISAS
     23/* #define S390X_DEBUG_DISAS_VERBOSE */
     24
     25#ifdef S390X_DEBUG_DISAS_VERBOSE
     26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
     27#else
     28#  define LOG_DISAS(...) do { } while (0)
     29#endif
     30
     31#include "qemu/osdep.h"
     32#include "cpu.h"
     33#include "s390x-internal.h"
     34#include "disas/disas.h"
     35#include "exec/exec-all.h"
     36#include "tcg/tcg-op.h"
     37#include "tcg/tcg-op-gvec.h"
     38#include "qemu/log.h"
     39#include "qemu/host-utils.h"
     40#include "exec/cpu_ldst.h"
     41#include "exec/gen-icount.h"
     42#include "exec/helper-proto.h"
     43#include "exec/helper-gen.h"
     44
     45#include "exec/translator.h"
     46#include "exec/log.h"
     47#include "qemu/atomic128.h"
     48
     49
     50/* Information that (most) every instruction needs to manipulate.  */
     51typedef struct DisasContext DisasContext;
     52typedef struct DisasInsn DisasInsn;
     53typedef struct DisasFields DisasFields;
     54
     55/*
     56 * Define a structure to hold the decoded fields.  We'll store each inside
     57 * an array indexed by an enum.  In order to conserve memory, we'll arrange
     58 * for fields that do not exist at the same time to overlap, thus the "C"
     59 * for compact.  For checking purposes there is an "O" for original index
     60 * as well that will be applied to availability bitmaps.
     61 */
     62
     63enum DisasFieldIndexO {
     64    FLD_O_r1,
     65    FLD_O_r2,
     66    FLD_O_r3,
     67    FLD_O_m1,
     68    FLD_O_m3,
     69    FLD_O_m4,
     70    FLD_O_m5,
     71    FLD_O_m6,
     72    FLD_O_b1,
     73    FLD_O_b2,
     74    FLD_O_b4,
     75    FLD_O_d1,
     76    FLD_O_d2,
     77    FLD_O_d4,
     78    FLD_O_x2,
     79    FLD_O_l1,
     80    FLD_O_l2,
     81    FLD_O_i1,
     82    FLD_O_i2,
     83    FLD_O_i3,
     84    FLD_O_i4,
     85    FLD_O_i5,
     86    FLD_O_v1,
     87    FLD_O_v2,
     88    FLD_O_v3,
     89    FLD_O_v4,
     90};
     91
     92enum DisasFieldIndexC {
     93    FLD_C_r1 = 0,
     94    FLD_C_m1 = 0,
     95    FLD_C_b1 = 0,
     96    FLD_C_i1 = 0,
     97    FLD_C_v1 = 0,
     98
     99    FLD_C_r2 = 1,
    100    FLD_C_b2 = 1,
    101    FLD_C_i2 = 1,
    102
    103    FLD_C_r3 = 2,
    104    FLD_C_m3 = 2,
    105    FLD_C_i3 = 2,
    106    FLD_C_v3 = 2,
    107
    108    FLD_C_m4 = 3,
    109    FLD_C_b4 = 3,
    110    FLD_C_i4 = 3,
    111    FLD_C_l1 = 3,
    112    FLD_C_v4 = 3,
    113
    114    FLD_C_i5 = 4,
    115    FLD_C_d1 = 4,
    116    FLD_C_m5 = 4,
    117
    118    FLD_C_d2 = 5,
    119    FLD_C_m6 = 5,
    120
    121    FLD_C_d4 = 6,
    122    FLD_C_x2 = 6,
    123    FLD_C_l2 = 6,
    124    FLD_C_v2 = 6,
    125
    126    NUM_C_FIELD = 7
    127};
    128
    129struct DisasFields {
    130    uint64_t raw_insn;
    131    unsigned op:8;
    132    unsigned op2:8;
    133    unsigned presentC:16;
    134    unsigned int presentO;
    135    int c[NUM_C_FIELD];
    136};
    137
    138struct DisasContext {
    139    DisasContextBase base;
    140    const DisasInsn *insn;
    141    TCGOp *insn_start;
    142    DisasFields fields;
    143    uint64_t ex_value;
    144    /*
    145     * During translate_one(), pc_tmp is used to determine the instruction
    146     * to be executed after base.pc_next - e.g. next sequential instruction
    147     * or a branch target.
    148     */
    149    uint64_t pc_tmp;
    150    uint32_t ilen;
    151    enum cc_op cc_op;
    152    bool do_debug;
    153};
    154
    155/* Information carried about a condition to be evaluated.  */
    156typedef struct {
    157    TCGCond cond:8;
    158    bool is_64;
    159    bool g1;
    160    bool g2;
    161    union {
    162        struct { TCGv_i64 a, b; } s64;
    163        struct { TCGv_i32 a, b; } s32;
    164    } u;
    165} DisasCompare;
    166
    167#ifdef DEBUG_INLINE_BRANCHES
    168static uint64_t inline_branch_hit[CC_OP_MAX];
    169static uint64_t inline_branch_miss[CC_OP_MAX];
    170#endif
    171
    172static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
    173{
    174    TCGv_i64 tmp;
    175
    176    if (s->base.tb->flags & FLAG_MASK_32) {
    177        if (s->base.tb->flags & FLAG_MASK_64) {
    178            tcg_gen_movi_i64(out, pc);
    179            return;
    180        }
    181        pc |= 0x80000000;
    182    }
    183    assert(!(s->base.tb->flags & FLAG_MASK_64));
    184    tmp = tcg_const_i64(pc);
    185    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
    186    tcg_temp_free_i64(tmp);
    187}
    188
    189static TCGv_i64 psw_addr;
    190static TCGv_i64 psw_mask;
    191static TCGv_i64 gbea;
    192
    193static TCGv_i32 cc_op;
    194static TCGv_i64 cc_src;
    195static TCGv_i64 cc_dst;
    196static TCGv_i64 cc_vr;
    197
    198static char cpu_reg_names[16][4];
    199static TCGv_i64 regs[16];
    200
    201void s390x_translate_init(void)
    202{
    203    int i;
    204
    205    psw_addr = tcg_global_mem_new_i64(cpu_env,
    206                                      offsetof(CPUS390XState, psw.addr),
    207                                      "psw_addr");
    208    psw_mask = tcg_global_mem_new_i64(cpu_env,
    209                                      offsetof(CPUS390XState, psw.mask),
    210                                      "psw_mask");
    211    gbea = tcg_global_mem_new_i64(cpu_env,
    212                                  offsetof(CPUS390XState, gbea),
    213                                  "gbea");
    214
    215    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
    216                                   "cc_op");
    217    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
    218                                    "cc_src");
    219    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
    220                                    "cc_dst");
    221    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
    222                                   "cc_vr");
    223
    224    for (i = 0; i < 16; i++) {
    225        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
    226        regs[i] = tcg_global_mem_new(cpu_env,
    227                                     offsetof(CPUS390XState, regs[i]),
    228                                     cpu_reg_names[i]);
    229    }
    230}
    231
    232static inline int vec_full_reg_offset(uint8_t reg)
    233{
    234    g_assert(reg < 32);
    235    return offsetof(CPUS390XState, vregs[reg][0]);
    236}
    237
    238static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
    239{
    240    /* Convert element size (es) - e.g. MO_8 - to bytes */
    241    const uint8_t bytes = 1 << es;
    242    int offs = enr * bytes;
    243
    244    /*
    245     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
    246     * of the 16 byte vector, on both, little and big endian systems.
    247     *
    248     * Big Endian (target/possible host)
    249     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
    250     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
    251     * W:  [             0][             1] - [             2][             3]
    252     * DW: [                             0] - [                             1]
    253     *
    254     * Little Endian (possible host)
    255     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
    256     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
    257     * W:  [             1][             0] - [             3][             2]
    258     * DW: [                             0] - [                             1]
    259     *
    260     * For 16 byte elements, the two 8 byte halves will not form a host
    261     * int128 if the host is little endian, since they're in the wrong order.
    262     * Some operations (e.g. xor) do not care. For operations like addition,
    263     * the two 8 byte elements have to be loaded separately. Let's force all
    264     * 16 byte operations to handle it in a special way.
    265     */
    266    g_assert(es <= MO_64);
    267#ifndef HOST_WORDS_BIGENDIAN
    268    offs ^= (8 - bytes);
    269#endif
    270    return offs + vec_full_reg_offset(reg);
    271}
    272
    273static inline int freg64_offset(uint8_t reg)
    274{
    275    g_assert(reg < 16);
    276    return vec_reg_offset(reg, 0, MO_64);
    277}
    278
    279static inline int freg32_offset(uint8_t reg)
    280{
    281    g_assert(reg < 16);
    282    return vec_reg_offset(reg, 0, MO_32);
    283}
    284
    285static TCGv_i64 load_reg(int reg)
    286{
    287    TCGv_i64 r = tcg_temp_new_i64();
    288    tcg_gen_mov_i64(r, regs[reg]);
    289    return r;
    290}
    291
    292static TCGv_i64 load_freg(int reg)
    293{
    294    TCGv_i64 r = tcg_temp_new_i64();
    295
    296    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
    297    return r;
    298}
    299
    300static TCGv_i64 load_freg32_i64(int reg)
    301{
    302    TCGv_i64 r = tcg_temp_new_i64();
    303
    304    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
    305    return r;
    306}
    307
    308static void store_reg(int reg, TCGv_i64 v)
    309{
    310    tcg_gen_mov_i64(regs[reg], v);
    311}
    312
    313static void store_freg(int reg, TCGv_i64 v)
    314{
    315    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
    316}
    317
    318static void store_reg32_i64(int reg, TCGv_i64 v)
    319{
    320    /* 32 bit register writes keep the upper half */
    321    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
    322}
    323
    324static void store_reg32h_i64(int reg, TCGv_i64 v)
    325{
    326    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
    327}
    328
    329static void store_freg32_i64(int reg, TCGv_i64 v)
    330{
    331    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
    332}
    333
    334static void return_low128(TCGv_i64 dest)
    335{
    336    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
    337}
    338
    339static void update_psw_addr(DisasContext *s)
    340{
    341    /* psw.addr */
    342    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
    343}
    344
    345static void per_branch(DisasContext *s, bool to_next)
    346{
    347#ifndef CONFIG_USER_ONLY
    348    tcg_gen_movi_i64(gbea, s->base.pc_next);
    349
    350    if (s->base.tb->flags & FLAG_MASK_PER) {
    351        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
    352        gen_helper_per_branch(cpu_env, gbea, next_pc);
    353        if (to_next) {
    354            tcg_temp_free_i64(next_pc);
    355        }
    356    }
    357#endif
    358}
    359
    360static void per_branch_cond(DisasContext *s, TCGCond cond,
    361                            TCGv_i64 arg1, TCGv_i64 arg2)
    362{
    363#ifndef CONFIG_USER_ONLY
    364    if (s->base.tb->flags & FLAG_MASK_PER) {
    365        TCGLabel *lab = gen_new_label();
    366        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
    367
    368        tcg_gen_movi_i64(gbea, s->base.pc_next);
    369        gen_helper_per_branch(cpu_env, gbea, psw_addr);
    370
    371        gen_set_label(lab);
    372    } else {
    373        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
    374        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
    375        tcg_temp_free_i64(pc);
    376    }
    377#endif
    378}
    379
    380static void per_breaking_event(DisasContext *s)
    381{
    382    tcg_gen_movi_i64(gbea, s->base.pc_next);
    383}
    384
    385static void update_cc_op(DisasContext *s)
    386{
    387    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
    388        tcg_gen_movi_i32(cc_op, s->cc_op);
    389    }
    390}
    391
    392static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
    393                                uint64_t pc)
    394{
    395    return (uint64_t)translator_lduw(env, &s->base, pc);
    396}
    397
    398static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
    399                                uint64_t pc)
    400{
    401    return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
    402}
    403
    404static int get_mem_index(DisasContext *s)
    405{
    406#ifdef CONFIG_USER_ONLY
    407    return MMU_USER_IDX;
    408#else
    409    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
    410        return MMU_REAL_IDX;
    411    }
    412
    413    switch (s->base.tb->flags & FLAG_MASK_ASC) {
    414    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
    415        return MMU_PRIMARY_IDX;
    416    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
    417        return MMU_SECONDARY_IDX;
    418    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
    419        return MMU_HOME_IDX;
    420    default:
    421        tcg_abort();
    422        break;
    423    }
    424#endif
    425}
    426
    427static void gen_exception(int excp)
    428{
    429    TCGv_i32 tmp = tcg_const_i32(excp);
    430    gen_helper_exception(cpu_env, tmp);
    431    tcg_temp_free_i32(tmp);
    432}
    433
    434static void gen_program_exception(DisasContext *s, int code)
    435{
    436    TCGv_i32 tmp;
    437
    438    /* Remember what pgm exeption this was.  */
    439    tmp = tcg_const_i32(code);
    440    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
    441    tcg_temp_free_i32(tmp);
    442
    443    tmp = tcg_const_i32(s->ilen);
    444    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
    445    tcg_temp_free_i32(tmp);
    446
    447    /* update the psw */
    448    update_psw_addr(s);
    449
    450    /* Save off cc.  */
    451    update_cc_op(s);
    452
    453    /* Trigger exception.  */
    454    gen_exception(EXCP_PGM);
    455}
    456
    457static inline void gen_illegal_opcode(DisasContext *s)
    458{
    459    gen_program_exception(s, PGM_OPERATION);
    460}
    461
    462static inline void gen_data_exception(uint8_t dxc)
    463{
    464    TCGv_i32 tmp = tcg_const_i32(dxc);
    465    gen_helper_data_exception(cpu_env, tmp);
    466    tcg_temp_free_i32(tmp);
    467}
    468
    469static inline void gen_trap(DisasContext *s)
    470{
    471    /* Set DXC to 0xff */
    472    gen_data_exception(0xff);
    473}
    474
    475static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
    476                                  int64_t imm)
    477{
    478    tcg_gen_addi_i64(dst, src, imm);
    479    if (!(s->base.tb->flags & FLAG_MASK_64)) {
    480        if (s->base.tb->flags & FLAG_MASK_32) {
    481            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
    482        } else {
    483            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
    484        }
    485    }
    486}
    487
    488static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
    489{
    490    TCGv_i64 tmp = tcg_temp_new_i64();
    491
    492    /*
    493     * Note that d2 is limited to 20 bits, signed.  If we crop negative
    494     * displacements early we create larger immedate addends.
    495     */
    496    if (b2 && x2) {
    497        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
    498        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
    499    } else if (b2) {
    500        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
    501    } else if (x2) {
    502        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
    503    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
    504        if (s->base.tb->flags & FLAG_MASK_32) {
    505            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
    506        } else {
    507            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
    508        }
    509    } else {
    510        tcg_gen_movi_i64(tmp, d2);
    511    }
    512
    513    return tmp;
    514}
    515
    516static inline bool live_cc_data(DisasContext *s)
    517{
    518    return (s->cc_op != CC_OP_DYNAMIC
    519            && s->cc_op != CC_OP_STATIC
    520            && s->cc_op > 3);
    521}
    522
    523static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
    524{
    525    if (live_cc_data(s)) {
    526        tcg_gen_discard_i64(cc_src);
    527        tcg_gen_discard_i64(cc_dst);
    528        tcg_gen_discard_i64(cc_vr);
    529    }
    530    s->cc_op = CC_OP_CONST0 + val;
    531}
    532
    533static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
    534{
    535    if (live_cc_data(s)) {
    536        tcg_gen_discard_i64(cc_src);
    537        tcg_gen_discard_i64(cc_vr);
    538    }
    539    tcg_gen_mov_i64(cc_dst, dst);
    540    s->cc_op = op;
    541}
    542
    543static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
    544                                  TCGv_i64 dst)
    545{
    546    if (live_cc_data(s)) {
    547        tcg_gen_discard_i64(cc_vr);
    548    }
    549    tcg_gen_mov_i64(cc_src, src);
    550    tcg_gen_mov_i64(cc_dst, dst);
    551    s->cc_op = op;
    552}
    553
    554static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
    555                                  TCGv_i64 dst, TCGv_i64 vr)
    556{
    557    tcg_gen_mov_i64(cc_src, src);
    558    tcg_gen_mov_i64(cc_dst, dst);
    559    tcg_gen_mov_i64(cc_vr, vr);
    560    s->cc_op = op;
    561}
    562
    563static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
    564{
    565    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
    566}
    567
    568/* CC value is in env->cc_op */
    569static void set_cc_static(DisasContext *s)
    570{
    571    if (live_cc_data(s)) {
    572        tcg_gen_discard_i64(cc_src);
    573        tcg_gen_discard_i64(cc_dst);
    574        tcg_gen_discard_i64(cc_vr);
    575    }
    576    s->cc_op = CC_OP_STATIC;
    577}
    578
    579/* calculates cc into cc_op */
    580static void gen_op_calc_cc(DisasContext *s)
    581{
    582    TCGv_i32 local_cc_op = NULL;
    583    TCGv_i64 dummy = NULL;
    584
    585    switch (s->cc_op) {
    586    default:
    587        dummy = tcg_const_i64(0);
    588        /* FALLTHRU */
    589    case CC_OP_ADD_64:
    590    case CC_OP_SUB_64:
    591    case CC_OP_ADD_32:
    592    case CC_OP_SUB_32:
    593        local_cc_op = tcg_const_i32(s->cc_op);
    594        break;
    595    case CC_OP_CONST0:
    596    case CC_OP_CONST1:
    597    case CC_OP_CONST2:
    598    case CC_OP_CONST3:
    599    case CC_OP_STATIC:
    600    case CC_OP_DYNAMIC:
    601        break;
    602    }
    603
    604    switch (s->cc_op) {
    605    case CC_OP_CONST0:
    606    case CC_OP_CONST1:
    607    case CC_OP_CONST2:
    608    case CC_OP_CONST3:
    609        /* s->cc_op is the cc value */
    610        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
    611        break;
    612    case CC_OP_STATIC:
    613        /* env->cc_op already is the cc value */
    614        break;
    615    case CC_OP_NZ:
    616    case CC_OP_ABS_64:
    617    case CC_OP_NABS_64:
    618    case CC_OP_ABS_32:
    619    case CC_OP_NABS_32:
    620    case CC_OP_LTGT0_32:
    621    case CC_OP_LTGT0_64:
    622    case CC_OP_COMP_32:
    623    case CC_OP_COMP_64:
    624    case CC_OP_NZ_F32:
    625    case CC_OP_NZ_F64:
    626    case CC_OP_FLOGR:
    627    case CC_OP_LCBB:
    628    case CC_OP_MULS_32:
    629        /* 1 argument */
    630        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
    631        break;
    632    case CC_OP_ADDU:
    633    case CC_OP_ICM:
    634    case CC_OP_LTGT_32:
    635    case CC_OP_LTGT_64:
    636    case CC_OP_LTUGTU_32:
    637    case CC_OP_LTUGTU_64:
    638    case CC_OP_TM_32:
    639    case CC_OP_TM_64:
    640    case CC_OP_SLA_32:
    641    case CC_OP_SLA_64:
    642    case CC_OP_SUBU:
    643    case CC_OP_NZ_F128:
    644    case CC_OP_VC:
    645    case CC_OP_MULS_64:
    646        /* 2 arguments */
    647        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
    648        break;
    649    case CC_OP_ADD_64:
    650    case CC_OP_SUB_64:
    651    case CC_OP_ADD_32:
    652    case CC_OP_SUB_32:
    653        /* 3 arguments */
    654        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
    655        break;
    656    case CC_OP_DYNAMIC:
    657        /* unknown operation - assume 3 arguments and cc_op in env */
    658        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
    659        break;
    660    default:
    661        tcg_abort();
    662    }
    663
    664    if (local_cc_op) {
    665        tcg_temp_free_i32(local_cc_op);
    666    }
    667    if (dummy) {
    668        tcg_temp_free_i64(dummy);
    669    }
    670
    671    /* We now have cc in cc_op as constant */
    672    set_cc_static(s);
    673}
    674
    675static bool use_goto_tb(DisasContext *s, uint64_t dest)
    676{
    677    if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
    678        return false;
    679    }
    680    return translator_use_goto_tb(&s->base, dest);
    681}
    682
    683static void account_noninline_branch(DisasContext *s, int cc_op)
    684{
    685#ifdef DEBUG_INLINE_BRANCHES
    686    inline_branch_miss[cc_op]++;
    687#endif
    688}
    689
    690static void account_inline_branch(DisasContext *s, int cc_op)
    691{
    692#ifdef DEBUG_INLINE_BRANCHES
    693    inline_branch_hit[cc_op]++;
    694#endif
    695}
    696
    697/* Table of mask values to comparison codes, given a comparison as input.
    698   For such, CC=3 should not be possible.  */
    699static const TCGCond ltgt_cond[16] = {
    700    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
    701    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
    702    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
    703    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
    704    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
    705    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
    706    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
    707    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
    708};
    709
    710/* Table of mask values to comparison codes, given a logic op as input.
    711   For such, only CC=0 and CC=1 should be possible.  */
    712static const TCGCond nz_cond[16] = {
    713    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
    714    TCG_COND_NEVER, TCG_COND_NEVER,
    715    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
    716    TCG_COND_NE, TCG_COND_NE,
    717    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
    718    TCG_COND_EQ, TCG_COND_EQ,
    719    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
    720    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
    721};
    722
    723/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
    724   details required to generate a TCG comparison.  */
    725static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
    726{
    727    TCGCond cond;
    728    enum cc_op old_cc_op = s->cc_op;
    729
    730    if (mask == 15 || mask == 0) {
    731        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
    732        c->u.s32.a = cc_op;
    733        c->u.s32.b = cc_op;
    734        c->g1 = c->g2 = true;
    735        c->is_64 = false;
    736        return;
    737    }
    738
    739    /* Find the TCG condition for the mask + cc op.  */
    740    switch (old_cc_op) {
    741    case CC_OP_LTGT0_32:
    742    case CC_OP_LTGT0_64:
    743    case CC_OP_LTGT_32:
    744    case CC_OP_LTGT_64:
    745        cond = ltgt_cond[mask];
    746        if (cond == TCG_COND_NEVER) {
    747            goto do_dynamic;
    748        }
    749        account_inline_branch(s, old_cc_op);
    750        break;
    751
    752    case CC_OP_LTUGTU_32:
    753    case CC_OP_LTUGTU_64:
    754        cond = tcg_unsigned_cond(ltgt_cond[mask]);
    755        if (cond == TCG_COND_NEVER) {
    756            goto do_dynamic;
    757        }
    758        account_inline_branch(s, old_cc_op);
    759        break;
    760
    761    case CC_OP_NZ:
    762        cond = nz_cond[mask];
    763        if (cond == TCG_COND_NEVER) {
    764            goto do_dynamic;
    765        }
    766        account_inline_branch(s, old_cc_op);
    767        break;
    768
    769    case CC_OP_TM_32:
    770    case CC_OP_TM_64:
    771        switch (mask) {
    772        case 8:
    773            cond = TCG_COND_EQ;
    774            break;
    775        case 4 | 2 | 1:
    776            cond = TCG_COND_NE;
    777            break;
    778        default:
    779            goto do_dynamic;
    780        }
    781        account_inline_branch(s, old_cc_op);
    782        break;
    783
    784    case CC_OP_ICM:
    785        switch (mask) {
    786        case 8:
    787            cond = TCG_COND_EQ;
    788            break;
    789        case 4 | 2 | 1:
    790        case 4 | 2:
    791            cond = TCG_COND_NE;
    792            break;
    793        default:
    794            goto do_dynamic;
    795        }
    796        account_inline_branch(s, old_cc_op);
    797        break;
    798
    799    case CC_OP_FLOGR:
    800        switch (mask & 0xa) {
    801        case 8: /* src == 0 -> no one bit found */
    802            cond = TCG_COND_EQ;
    803            break;
    804        case 2: /* src != 0 -> one bit found */
    805            cond = TCG_COND_NE;
    806            break;
    807        default:
    808            goto do_dynamic;
    809        }
    810        account_inline_branch(s, old_cc_op);
    811        break;
    812
    813    case CC_OP_ADDU:
    814    case CC_OP_SUBU:
    815        switch (mask) {
    816        case 8 | 2: /* result == 0 */
    817            cond = TCG_COND_EQ;
    818            break;
    819        case 4 | 1: /* result != 0 */
    820            cond = TCG_COND_NE;
    821            break;
    822        case 8 | 4: /* !carry (borrow) */
    823            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
    824            break;
    825        case 2 | 1: /* carry (!borrow) */
    826            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
    827            break;
    828        default:
    829            goto do_dynamic;
    830        }
    831        account_inline_branch(s, old_cc_op);
    832        break;
    833
    834    default:
    835    do_dynamic:
    836        /* Calculate cc value.  */
    837        gen_op_calc_cc(s);
    838        /* FALLTHRU */
    839
    840    case CC_OP_STATIC:
    841        /* Jump based on CC.  We'll load up the real cond below;
    842           the assignment here merely avoids a compiler warning.  */
    843        account_noninline_branch(s, old_cc_op);
    844        old_cc_op = CC_OP_STATIC;
    845        cond = TCG_COND_NEVER;
    846        break;
    847    }
    848
    849    /* Load up the arguments of the comparison.  */
    850    c->is_64 = true;
    851    c->g1 = c->g2 = false;
    852    switch (old_cc_op) {
    853    case CC_OP_LTGT0_32:
    854        c->is_64 = false;
    855        c->u.s32.a = tcg_temp_new_i32();
    856        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
    857        c->u.s32.b = tcg_const_i32(0);
    858        break;
    859    case CC_OP_LTGT_32:
    860    case CC_OP_LTUGTU_32:
    861        c->is_64 = false;
    862        c->u.s32.a = tcg_temp_new_i32();
    863        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
    864        c->u.s32.b = tcg_temp_new_i32();
    865        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
    866        break;
    867
    868    case CC_OP_LTGT0_64:
    869    case CC_OP_NZ:
    870    case CC_OP_FLOGR:
    871        c->u.s64.a = cc_dst;
    872        c->u.s64.b = tcg_const_i64(0);
    873        c->g1 = true;
    874        break;
    875    case CC_OP_LTGT_64:
    876    case CC_OP_LTUGTU_64:
    877        c->u.s64.a = cc_src;
    878        c->u.s64.b = cc_dst;
    879        c->g1 = c->g2 = true;
    880        break;
    881
    882    case CC_OP_TM_32:
    883    case CC_OP_TM_64:
    884    case CC_OP_ICM:
    885        c->u.s64.a = tcg_temp_new_i64();
    886        c->u.s64.b = tcg_const_i64(0);
    887        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
    888        break;
    889
    890    case CC_OP_ADDU:
    891    case CC_OP_SUBU:
    892        c->is_64 = true;
    893        c->u.s64.b = tcg_const_i64(0);
    894        c->g1 = true;
    895        switch (mask) {
    896        case 8 | 2:
    897        case 4 | 1: /* result */
    898            c->u.s64.a = cc_dst;
    899            break;
    900        case 8 | 4:
    901        case 2 | 1: /* carry */
    902            c->u.s64.a = cc_src;
    903            break;
    904        default:
    905            g_assert_not_reached();
    906        }
    907        break;
    908
    909    case CC_OP_STATIC:
    910        c->is_64 = false;
    911        c->u.s32.a = cc_op;
    912        c->g1 = true;
    913        switch (mask) {
    914        case 0x8 | 0x4 | 0x2: /* cc != 3 */
    915            cond = TCG_COND_NE;
    916            c->u.s32.b = tcg_const_i32(3);
    917            break;
    918        case 0x8 | 0x4 | 0x1: /* cc != 2 */
    919            cond = TCG_COND_NE;
    920            c->u.s32.b = tcg_const_i32(2);
    921            break;
    922        case 0x8 | 0x2 | 0x1: /* cc != 1 */
    923            cond = TCG_COND_NE;
    924            c->u.s32.b = tcg_const_i32(1);
    925            break;
    926        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
    927            cond = TCG_COND_EQ;
    928            c->g1 = false;
    929            c->u.s32.a = tcg_temp_new_i32();
    930            c->u.s32.b = tcg_const_i32(0);
    931            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
    932            break;
    933        case 0x8 | 0x4: /* cc < 2 */
    934            cond = TCG_COND_LTU;
    935            c->u.s32.b = tcg_const_i32(2);
    936            break;
    937        case 0x8: /* cc == 0 */
    938            cond = TCG_COND_EQ;
    939            c->u.s32.b = tcg_const_i32(0);
    940            break;
    941        case 0x4 | 0x2 | 0x1: /* cc != 0 */
    942            cond = TCG_COND_NE;
    943            c->u.s32.b = tcg_const_i32(0);
    944            break;
    945        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
    946            cond = TCG_COND_NE;
    947            c->g1 = false;
    948            c->u.s32.a = tcg_temp_new_i32();
    949            c->u.s32.b = tcg_const_i32(0);
    950            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
    951            break;
    952        case 0x4: /* cc == 1 */
    953            cond = TCG_COND_EQ;
    954            c->u.s32.b = tcg_const_i32(1);
    955            break;
    956        case 0x2 | 0x1: /* cc > 1 */
    957            cond = TCG_COND_GTU;
    958            c->u.s32.b = tcg_const_i32(1);
    959            break;
    960        case 0x2: /* cc == 2 */
    961            cond = TCG_COND_EQ;
    962            c->u.s32.b = tcg_const_i32(2);
    963            break;
    964        case 0x1: /* cc == 3 */
    965            cond = TCG_COND_EQ;
    966            c->u.s32.b = tcg_const_i32(3);
    967            break;
    968        default:
    969            /* CC is masked by something else: (8 >> cc) & mask.  */
    970            cond = TCG_COND_NE;
    971            c->g1 = false;
    972            c->u.s32.a = tcg_const_i32(8);
    973            c->u.s32.b = tcg_const_i32(0);
    974            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
    975            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
    976            break;
    977        }
    978        break;
    979
    980    default:
    981        abort();
    982    }
    983    c->cond = cond;
    984}
    985
    986static void free_compare(DisasCompare *c)
    987{
    988    if (!c->g1) {
    989        if (c->is_64) {
    990            tcg_temp_free_i64(c->u.s64.a);
    991        } else {
    992            tcg_temp_free_i32(c->u.s32.a);
    993        }
    994    }
    995    if (!c->g2) {
    996        if (c->is_64) {
    997            tcg_temp_free_i64(c->u.s64.b);
    998        } else {
    999            tcg_temp_free_i32(c->u.s32.b);
   1000        }
   1001    }
   1002}
   1003
   1004/* ====================================================================== */
   1005/* Define the insn format enumeration.  */
   1006#define F0(N)                         FMT_##N,
   1007#define F1(N, X1)                     F0(N)
   1008#define F2(N, X1, X2)                 F0(N)
   1009#define F3(N, X1, X2, X3)             F0(N)
   1010#define F4(N, X1, X2, X3, X4)         F0(N)
   1011#define F5(N, X1, X2, X3, X4, X5)     F0(N)
   1012#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
   1013
   1014typedef enum {
   1015#include "insn-format.def"
   1016} DisasFormat;
   1017
   1018#undef F0
   1019#undef F1
   1020#undef F2
   1021#undef F3
   1022#undef F4
   1023#undef F5
   1024#undef F6
   1025
   1026/* This is the way fields are to be accessed out of DisasFields.  */
   1027#define have_field(S, F)  have_field1((S), FLD_O_##F)
   1028#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
   1029
   1030static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
   1031{
   1032    return (s->fields.presentO >> c) & 1;
   1033}
   1034
   1035static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
   1036                      enum DisasFieldIndexC c)
   1037{
   1038    assert(have_field1(s, o));
   1039    return s->fields.c[c];
   1040}
   1041
   1042/* Describe the layout of each field in each format.  */
   1043typedef struct DisasField {
   1044    unsigned int beg:8;
   1045    unsigned int size:8;
   1046    unsigned int type:2;
   1047    unsigned int indexC:6;
   1048    enum DisasFieldIndexO indexO:8;
   1049} DisasField;
   1050
   1051typedef struct DisasFormatInfo {
   1052    DisasField op[NUM_C_FIELD];
   1053} DisasFormatInfo;
   1054
   1055#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
   1056#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
   1057#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
   1058#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
   1059                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
   1060#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
   1061                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
   1062                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
   1063#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
   1064                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
   1065#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
   1066                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
   1067                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
   1068#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
   1069#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
   1070
   1071#define F0(N)                     { { } },
   1072#define F1(N, X1)                 { { X1 } },
   1073#define F2(N, X1, X2)             { { X1, X2 } },
   1074#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
   1075#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
   1076#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
   1077#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
   1078
   1079static const DisasFormatInfo format_info[] = {
   1080#include "insn-format.def"
   1081};
   1082
   1083#undef F0
   1084#undef F1
   1085#undef F2
   1086#undef F3
   1087#undef F4
   1088#undef F5
   1089#undef F6
   1090#undef R
   1091#undef M
   1092#undef V
   1093#undef BD
   1094#undef BXD
   1095#undef BDL
   1096#undef BXDL
   1097#undef I
   1098#undef L
   1099
   1100/* Generally, we'll extract operands into this structures, operate upon
   1101   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
   1102   of routines below for more details.  */
   1103typedef struct {
   1104    bool g_out, g_out2, g_in1, g_in2;
   1105    TCGv_i64 out, out2, in1, in2;
   1106    TCGv_i64 addr1;
   1107} DisasOps;
   1108
   1109/* Instructions can place constraints on their operands, raising specification
   1110   exceptions if they are violated.  To make this easy to automate, each "in1",
   1111   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
   1112   of the following, or 0.  To make this easy to document, we'll put the
   1113   SPEC_<name> defines next to <name>.  */
   1114
   1115#define SPEC_r1_even    1
   1116#define SPEC_r2_even    2
   1117#define SPEC_r3_even    4
   1118#define SPEC_r1_f128    8
   1119#define SPEC_r2_f128    16
   1120
   1121/* Return values from translate_one, indicating the state of the TB.  */
   1122
   1123/* We are not using a goto_tb (for whatever reason), but have updated
   1124   the PC (for whatever reason), so there's no need to do it again on
   1125   exiting the TB.  */
   1126#define DISAS_PC_UPDATED        DISAS_TARGET_0
   1127
   1128/* We have emitted one or more goto_tb.  No fixup required.  */
   1129#define DISAS_GOTO_TB           DISAS_TARGET_1
   1130
   1131/* We have updated the PC and CC values.  */
   1132#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
   1133
   1134/* We are exiting the TB, but have neither emitted a goto_tb, nor
   1135   updated the PC for the next instruction to be executed.  */
   1136#define DISAS_PC_STALE          DISAS_TARGET_3
   1137
   1138/* We are exiting the TB to the main loop.  */
   1139#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
   1140
   1141
   1142/* Instruction flags */
   1143#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
   1144#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
   1145#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
   1146#define IF_BFP      0x0008      /* binary floating point instruction */
   1147#define IF_DFP      0x0010      /* decimal floating point instruction */
   1148#define IF_PRIV     0x0020      /* privileged instruction */
   1149#define IF_VEC      0x0040      /* vector instruction */
   1150#define IF_IO       0x0080      /* input/output instruction */
   1151
   1152struct DisasInsn {
   1153    unsigned opc:16;
   1154    unsigned flags:16;
   1155    DisasFormat fmt:8;
   1156    unsigned fac:8;
   1157    unsigned spec:8;
   1158
   1159    const char *name;
   1160
   1161    /* Pre-process arguments before HELP_OP.  */
   1162    void (*help_in1)(DisasContext *, DisasOps *);
   1163    void (*help_in2)(DisasContext *, DisasOps *);
   1164    void (*help_prep)(DisasContext *, DisasOps *);
   1165
   1166    /*
   1167     * Post-process output after HELP_OP.
   1168     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
   1169     */
   1170    void (*help_wout)(DisasContext *, DisasOps *);
   1171    void (*help_cout)(DisasContext *, DisasOps *);
   1172
   1173    /* Implement the operation itself.  */
   1174    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
   1175
   1176    uint64_t data;
   1177};
   1178
   1179/* ====================================================================== */
   1180/* Miscellaneous helpers, used by several operations.  */
   1181
   1182static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
   1183{
   1184    int b2 = get_field(s, b2);
   1185    int d2 = get_field(s, d2);
   1186
   1187    if (b2 == 0) {
   1188        o->in2 = tcg_const_i64(d2 & mask);
   1189    } else {
   1190        o->in2 = get_address(s, 0, b2, d2);
   1191        tcg_gen_andi_i64(o->in2, o->in2, mask);
   1192    }
   1193}
   1194
   1195static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
   1196{
   1197    if (dest == s->pc_tmp) {
   1198        per_branch(s, true);
   1199        return DISAS_NEXT;
   1200    }
   1201    if (use_goto_tb(s, dest)) {
   1202        update_cc_op(s);
   1203        per_breaking_event(s);
   1204        tcg_gen_goto_tb(0);
   1205        tcg_gen_movi_i64(psw_addr, dest);
   1206        tcg_gen_exit_tb(s->base.tb, 0);
   1207        return DISAS_GOTO_TB;
   1208    } else {
   1209        tcg_gen_movi_i64(psw_addr, dest);
   1210        per_branch(s, false);
   1211        return DISAS_PC_UPDATED;
   1212    }
   1213}
   1214
   1215static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
   1216                                 bool is_imm, int imm, TCGv_i64 cdest)
   1217{
   1218    DisasJumpType ret;
   1219    uint64_t dest = s->base.pc_next + 2 * imm;
   1220    TCGLabel *lab;
   1221
   1222    /* Take care of the special cases first.  */
   1223    if (c->cond == TCG_COND_NEVER) {
   1224        ret = DISAS_NEXT;
   1225        goto egress;
   1226    }
   1227    if (is_imm) {
   1228        if (dest == s->pc_tmp) {
   1229            /* Branch to next.  */
   1230            per_branch(s, true);
   1231            ret = DISAS_NEXT;
   1232            goto egress;
   1233        }
   1234        if (c->cond == TCG_COND_ALWAYS) {
   1235            ret = help_goto_direct(s, dest);
   1236            goto egress;
   1237        }
   1238    } else {
   1239        if (!cdest) {
   1240            /* E.g. bcr %r0 -> no branch.  */
   1241            ret = DISAS_NEXT;
   1242            goto egress;
   1243        }
   1244        if (c->cond == TCG_COND_ALWAYS) {
   1245            tcg_gen_mov_i64(psw_addr, cdest);
   1246            per_branch(s, false);
   1247            ret = DISAS_PC_UPDATED;
   1248            goto egress;
   1249        }
   1250    }
   1251
   1252    if (use_goto_tb(s, s->pc_tmp)) {
   1253        if (is_imm && use_goto_tb(s, dest)) {
   1254            /* Both exits can use goto_tb.  */
   1255            update_cc_op(s);
   1256
   1257            lab = gen_new_label();
   1258            if (c->is_64) {
   1259                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
   1260            } else {
   1261                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
   1262            }
   1263
   1264            /* Branch not taken.  */
   1265            tcg_gen_goto_tb(0);
   1266            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
   1267            tcg_gen_exit_tb(s->base.tb, 0);
   1268
   1269            /* Branch taken.  */
   1270            gen_set_label(lab);
   1271            per_breaking_event(s);
   1272            tcg_gen_goto_tb(1);
   1273            tcg_gen_movi_i64(psw_addr, dest);
   1274            tcg_gen_exit_tb(s->base.tb, 1);
   1275
   1276            ret = DISAS_GOTO_TB;
   1277        } else {
   1278            /* Fallthru can use goto_tb, but taken branch cannot.  */
   1279            /* Store taken branch destination before the brcond.  This
   1280               avoids having to allocate a new local temp to hold it.
   1281               We'll overwrite this in the not taken case anyway.  */
   1282            if (!is_imm) {
   1283                tcg_gen_mov_i64(psw_addr, cdest);
   1284            }
   1285
   1286            lab = gen_new_label();
   1287            if (c->is_64) {
   1288                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
   1289            } else {
   1290                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
   1291            }
   1292
   1293            /* Branch not taken.  */
   1294            update_cc_op(s);
   1295            tcg_gen_goto_tb(0);
   1296            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
   1297            tcg_gen_exit_tb(s->base.tb, 0);
   1298
   1299            gen_set_label(lab);
   1300            if (is_imm) {
   1301                tcg_gen_movi_i64(psw_addr, dest);
   1302            }
   1303            per_breaking_event(s);
   1304            ret = DISAS_PC_UPDATED;
   1305        }
   1306    } else {
   1307        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
   1308           Most commonly we're single-stepping or some other condition that
   1309           disables all use of goto_tb.  Just update the PC and exit.  */
   1310
   1311        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
   1312        if (is_imm) {
   1313            cdest = tcg_const_i64(dest);
   1314        }
   1315
   1316        if (c->is_64) {
   1317            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
   1318                                cdest, next);
   1319            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
   1320        } else {
   1321            TCGv_i32 t0 = tcg_temp_new_i32();
   1322            TCGv_i64 t1 = tcg_temp_new_i64();
   1323            TCGv_i64 z = tcg_const_i64(0);
   1324            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
   1325            tcg_gen_extu_i32_i64(t1, t0);
   1326            tcg_temp_free_i32(t0);
   1327            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
   1328            per_branch_cond(s, TCG_COND_NE, t1, z);
   1329            tcg_temp_free_i64(t1);
   1330            tcg_temp_free_i64(z);
   1331        }
   1332
   1333        if (is_imm) {
   1334            tcg_temp_free_i64(cdest);
   1335        }
   1336        tcg_temp_free_i64(next);
   1337
   1338        ret = DISAS_PC_UPDATED;
   1339    }
   1340
   1341 egress:
   1342    free_compare(c);
   1343    return ret;
   1344}
   1345
   1346/* ====================================================================== */
   1347/* The operations.  These perform the bulk of the work for any insn,
   1348   usually after the operands have been loaded and output initialized.  */
   1349
   1350static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
   1351{
   1352    tcg_gen_abs_i64(o->out, o->in2);
   1353    return DISAS_NEXT;
   1354}
   1355
   1356static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
   1357{
   1358    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
   1359    return DISAS_NEXT;
   1360}
   1361
   1362static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
   1363{
   1364    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
   1365    return DISAS_NEXT;
   1366}
   1367
   1368static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
   1369{
   1370    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
   1371    tcg_gen_mov_i64(o->out2, o->in2);
   1372    return DISAS_NEXT;
   1373}
   1374
   1375static DisasJumpType op_add(DisasContext *s, DisasOps *o)
   1376{
   1377    tcg_gen_add_i64(o->out, o->in1, o->in2);
   1378    return DISAS_NEXT;
   1379}
   1380
   1381static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
   1382{
   1383    tcg_gen_movi_i64(cc_src, 0);
   1384    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
   1385    return DISAS_NEXT;
   1386}
   1387
   1388/* Compute carry into cc_src. */
   1389static void compute_carry(DisasContext *s)
   1390{
   1391    switch (s->cc_op) {
   1392    case CC_OP_ADDU:
   1393        /* The carry value is already in cc_src (1,0). */
   1394        break;
   1395    case CC_OP_SUBU:
   1396        tcg_gen_addi_i64(cc_src, cc_src, 1);
   1397        break;
   1398    default:
   1399        gen_op_calc_cc(s);
   1400        /* fall through */
   1401    case CC_OP_STATIC:
   1402        /* The carry flag is the msb of CC; compute into cc_src. */
   1403        tcg_gen_extu_i32_i64(cc_src, cc_op);
   1404        tcg_gen_shri_i64(cc_src, cc_src, 1);
   1405        break;
   1406    }
   1407}
   1408
   1409static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
   1410{
   1411    compute_carry(s);
   1412    tcg_gen_add_i64(o->out, o->in1, o->in2);
   1413    tcg_gen_add_i64(o->out, o->out, cc_src);
   1414    return DISAS_NEXT;
   1415}
   1416
   1417static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
   1418{
   1419    compute_carry(s);
   1420
   1421    TCGv_i64 zero = tcg_const_i64(0);
   1422    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
   1423    tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
   1424    tcg_temp_free_i64(zero);
   1425
   1426    return DISAS_NEXT;
   1427}
   1428
   1429static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
   1430{
   1431    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
   1432
   1433    o->in1 = tcg_temp_new_i64();
   1434    if (non_atomic) {
   1435        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
   1436    } else {
   1437        /* Perform the atomic addition in memory. */
   1438        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
   1439                                     s->insn->data);
   1440    }
   1441
   1442    /* Recompute also for atomic case: needed for setting CC. */
   1443    tcg_gen_add_i64(o->out, o->in1, o->in2);
   1444
   1445    if (non_atomic) {
   1446        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
   1447    }
   1448    return DISAS_NEXT;
   1449}
   1450
   1451static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
   1452{
   1453    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
   1454
   1455    o->in1 = tcg_temp_new_i64();
   1456    if (non_atomic) {
   1457        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
   1458    } else {
   1459        /* Perform the atomic addition in memory. */
   1460        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
   1461                                     s->insn->data);
   1462    }
   1463
   1464    /* Recompute also for atomic case: needed for setting CC. */
   1465    tcg_gen_movi_i64(cc_src, 0);
   1466    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
   1467
   1468    if (non_atomic) {
   1469        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
   1470    }
   1471    return DISAS_NEXT;
   1472}
   1473
   1474static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
   1475{
   1476    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
   1477    return DISAS_NEXT;
   1478}
   1479
   1480static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
   1481{
   1482    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
   1483    return DISAS_NEXT;
   1484}
   1485
   1486static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
   1487{
   1488    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
   1489    return_low128(o->out2);
   1490    return DISAS_NEXT;
   1491}
   1492
   1493static DisasJumpType op_and(DisasContext *s, DisasOps *o)
   1494{
   1495    tcg_gen_and_i64(o->out, o->in1, o->in2);
   1496    return DISAS_NEXT;
   1497}
   1498
   1499static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
   1500{
   1501    int shift = s->insn->data & 0xff;
   1502    int size = s->insn->data >> 8;
   1503    uint64_t mask = ((1ull << size) - 1) << shift;
   1504
   1505    assert(!o->g_in2);
   1506    tcg_gen_shli_i64(o->in2, o->in2, shift);
   1507    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
   1508    tcg_gen_and_i64(o->out, o->in1, o->in2);
   1509
   1510    /* Produce the CC from only the bits manipulated.  */
   1511    tcg_gen_andi_i64(cc_dst, o->out, mask);
   1512    set_cc_nz_u64(s, cc_dst);
   1513    return DISAS_NEXT;
   1514}
   1515
   1516static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
   1517{
   1518    o->in1 = tcg_temp_new_i64();
   1519
   1520    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
   1521        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
   1522    } else {
   1523        /* Perform the atomic operation in memory. */
   1524        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
   1525                                     s->insn->data);
   1526    }
   1527
   1528    /* Recompute also for atomic case: needed for setting CC. */
   1529    tcg_gen_and_i64(o->out, o->in1, o->in2);
   1530
   1531    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
   1532        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
   1533    }
   1534    return DISAS_NEXT;
   1535}
   1536
   1537static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
   1538{
   1539    pc_to_link_info(o->out, s, s->pc_tmp);
   1540    if (o->in2) {
   1541        tcg_gen_mov_i64(psw_addr, o->in2);
   1542        per_branch(s, false);
   1543        return DISAS_PC_UPDATED;
   1544    } else {
   1545        return DISAS_NEXT;
   1546    }
   1547}
   1548
   1549static void save_link_info(DisasContext *s, DisasOps *o)
   1550{
   1551    TCGv_i64 t;
   1552
   1553    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
   1554        pc_to_link_info(o->out, s, s->pc_tmp);
   1555        return;
   1556    }
   1557    gen_op_calc_cc(s);
   1558    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
   1559    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
   1560    t = tcg_temp_new_i64();
   1561    tcg_gen_shri_i64(t, psw_mask, 16);
   1562    tcg_gen_andi_i64(t, t, 0x0f000000);
   1563    tcg_gen_or_i64(o->out, o->out, t);
   1564    tcg_gen_extu_i32_i64(t, cc_op);
   1565    tcg_gen_shli_i64(t, t, 28);
   1566    tcg_gen_or_i64(o->out, o->out, t);
   1567    tcg_temp_free_i64(t);
   1568}
   1569
   1570static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
   1571{
   1572    save_link_info(s, o);
   1573    if (o->in2) {
   1574        tcg_gen_mov_i64(psw_addr, o->in2);
   1575        per_branch(s, false);
   1576        return DISAS_PC_UPDATED;
   1577    } else {
   1578        return DISAS_NEXT;
   1579    }
   1580}
   1581
   1582static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
   1583{
   1584    pc_to_link_info(o->out, s, s->pc_tmp);
   1585    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
   1586}
   1587
   1588static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
   1589{
   1590    int m1 = get_field(s, m1);
   1591    bool is_imm = have_field(s, i2);
   1592    int imm = is_imm ? get_field(s, i2) : 0;
   1593    DisasCompare c;
   1594
   1595    /* BCR with R2 = 0 causes no branching */
   1596    if (have_field(s, r2) && get_field(s, r2) == 0) {
   1597        if (m1 == 14) {
   1598            /* Perform serialization */
   1599            /* FIXME: check for fast-BCR-serialization facility */
   1600            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
   1601        }
   1602        if (m1 == 15) {
   1603            /* Perform serialization */
   1604            /* FIXME: perform checkpoint-synchronisation */
   1605            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
   1606        }
   1607        return DISAS_NEXT;
   1608    }
   1609
   1610    disas_jcc(s, &c, m1);
   1611    return help_branch(s, &c, is_imm, imm, o->in2);
   1612}
   1613
   1614static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
   1615{
   1616    int r1 = get_field(s, r1);
   1617    bool is_imm = have_field(s, i2);
   1618    int imm = is_imm ? get_field(s, i2) : 0;
   1619    DisasCompare c;
   1620    TCGv_i64 t;
   1621
   1622    c.cond = TCG_COND_NE;
   1623    c.is_64 = false;
   1624    c.g1 = false;
   1625    c.g2 = false;
   1626
   1627    t = tcg_temp_new_i64();
   1628    tcg_gen_subi_i64(t, regs[r1], 1);
   1629    store_reg32_i64(r1, t);
   1630    c.u.s32.a = tcg_temp_new_i32();
   1631    c.u.s32.b = tcg_const_i32(0);
   1632    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
   1633    tcg_temp_free_i64(t);
   1634
   1635    return help_branch(s, &c, is_imm, imm, o->in2);
   1636}
   1637
   1638static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
   1639{
   1640    int r1 = get_field(s, r1);
   1641    int imm = get_field(s, i2);
   1642    DisasCompare c;
   1643    TCGv_i64 t;
   1644
   1645    c.cond = TCG_COND_NE;
   1646    c.is_64 = false;
   1647    c.g1 = false;
   1648    c.g2 = false;
   1649
   1650    t = tcg_temp_new_i64();
   1651    tcg_gen_shri_i64(t, regs[r1], 32);
   1652    tcg_gen_subi_i64(t, t, 1);
   1653    store_reg32h_i64(r1, t);
   1654    c.u.s32.a = tcg_temp_new_i32();
   1655    c.u.s32.b = tcg_const_i32(0);
   1656    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
   1657    tcg_temp_free_i64(t);
   1658
   1659    return help_branch(s, &c, 1, imm, o->in2);
   1660}
   1661
   1662static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
   1663{
   1664    int r1 = get_field(s, r1);
   1665    bool is_imm = have_field(s, i2);
   1666    int imm = is_imm ? get_field(s, i2) : 0;
   1667    DisasCompare c;
   1668
   1669    c.cond = TCG_COND_NE;
   1670    c.is_64 = true;
   1671    c.g1 = true;
   1672    c.g2 = false;
   1673
   1674    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
   1675    c.u.s64.a = regs[r1];
   1676    c.u.s64.b = tcg_const_i64(0);
   1677
   1678    return help_branch(s, &c, is_imm, imm, o->in2);
   1679}
   1680
   1681static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
   1682{
   1683    int r1 = get_field(s, r1);
   1684    int r3 = get_field(s, r3);
   1685    bool is_imm = have_field(s, i2);
   1686    int imm = is_imm ? get_field(s, i2) : 0;
   1687    DisasCompare c;
   1688    TCGv_i64 t;
   1689
   1690    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
   1691    c.is_64 = false;
   1692    c.g1 = false;
   1693    c.g2 = false;
   1694
   1695    t = tcg_temp_new_i64();
   1696    tcg_gen_add_i64(t, regs[r1], regs[r3]);
   1697    c.u.s32.a = tcg_temp_new_i32();
   1698    c.u.s32.b = tcg_temp_new_i32();
   1699    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
   1700    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
   1701    store_reg32_i64(r1, t);
   1702    tcg_temp_free_i64(t);
   1703
   1704    return help_branch(s, &c, is_imm, imm, o->in2);
   1705}
   1706
   1707static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
   1708{
   1709    int r1 = get_field(s, r1);
   1710    int r3 = get_field(s, r3);
   1711    bool is_imm = have_field(s, i2);
   1712    int imm = is_imm ? get_field(s, i2) : 0;
   1713    DisasCompare c;
   1714
   1715    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
   1716    c.is_64 = true;
   1717
   1718    if (r1 == (r3 | 1)) {
   1719        c.u.s64.b = load_reg(r3 | 1);
   1720        c.g2 = false;
   1721    } else {
   1722        c.u.s64.b = regs[r3 | 1];
   1723        c.g2 = true;
   1724    }
   1725
   1726    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
   1727    c.u.s64.a = regs[r1];
   1728    c.g1 = true;
   1729
   1730    return help_branch(s, &c, is_imm, imm, o->in2);
   1731}
   1732
   1733static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
   1734{
   1735    int imm, m3 = get_field(s, m3);
   1736    bool is_imm;
   1737    DisasCompare c;
   1738
   1739    c.cond = ltgt_cond[m3];
   1740    if (s->insn->data) {
   1741        c.cond = tcg_unsigned_cond(c.cond);
   1742    }
   1743    c.is_64 = c.g1 = c.g2 = true;
   1744    c.u.s64.a = o->in1;
   1745    c.u.s64.b = o->in2;
   1746
   1747    is_imm = have_field(s, i4);
   1748    if (is_imm) {
   1749        imm = get_field(s, i4);
   1750    } else {
   1751        imm = 0;
   1752        o->out = get_address(s, 0, get_field(s, b4),
   1753                             get_field(s, d4));
   1754    }
   1755
   1756    return help_branch(s, &c, is_imm, imm, o->out);
   1757}
   1758
   1759static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
   1760{
   1761    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
   1762    set_cc_static(s);
   1763    return DISAS_NEXT;
   1764}
   1765
   1766static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
   1767{
   1768    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
   1769    set_cc_static(s);
   1770    return DISAS_NEXT;
   1771}
   1772
   1773static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
   1774{
   1775    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
   1776    set_cc_static(s);
   1777    return DISAS_NEXT;
   1778}
   1779
   1780static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
   1781                                   bool m4_with_fpe)
   1782{
   1783    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
   1784    uint8_t m3 = get_field(s, m3);
   1785    uint8_t m4 = get_field(s, m4);
   1786
   1787    /* m3 field was introduced with FPE */
   1788    if (!fpe && m3_with_fpe) {
   1789        m3 = 0;
   1790    }
   1791    /* m4 field was introduced with FPE */
   1792    if (!fpe && m4_with_fpe) {
   1793        m4 = 0;
   1794    }
   1795
   1796    /* Check for valid rounding modes. Mode 3 was introduced later. */
   1797    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
   1798        gen_program_exception(s, PGM_SPECIFICATION);
   1799        return NULL;
   1800    }
   1801
   1802    return tcg_const_i32(deposit32(m3, 4, 4, m4));
   1803}
   1804
   1805static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
   1806{
   1807    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   1808
   1809    if (!m34) {
   1810        return DISAS_NORETURN;
   1811    }
   1812    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
   1813    tcg_temp_free_i32(m34);
   1814    set_cc_static(s);
   1815    return DISAS_NEXT;
   1816}
   1817
   1818static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
   1819{
   1820    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   1821
   1822    if (!m34) {
   1823        return DISAS_NORETURN;
   1824    }
   1825    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
   1826    tcg_temp_free_i32(m34);
   1827    set_cc_static(s);
   1828    return DISAS_NEXT;
   1829}
   1830
   1831static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
   1832{
   1833    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   1834
   1835    if (!m34) {
   1836        return DISAS_NORETURN;
   1837    }
   1838    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
   1839    tcg_temp_free_i32(m34);
   1840    set_cc_static(s);
   1841    return DISAS_NEXT;
   1842}
   1843
   1844static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
   1845{
   1846    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   1847
   1848    if (!m34) {
   1849        return DISAS_NORETURN;
   1850    }
   1851    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
   1852    tcg_temp_free_i32(m34);
   1853    set_cc_static(s);
   1854    return DISAS_NEXT;
   1855}
   1856
   1857static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
   1858{
   1859    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   1860
   1861    if (!m34) {
   1862        return DISAS_NORETURN;
   1863    }
   1864    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
   1865    tcg_temp_free_i32(m34);
   1866    set_cc_static(s);
   1867    return DISAS_NEXT;
   1868}
   1869
   1870static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
   1871{
   1872    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   1873
   1874    if (!m34) {
   1875        return DISAS_NORETURN;
   1876    }
   1877    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
   1878    tcg_temp_free_i32(m34);
   1879    set_cc_static(s);
   1880    return DISAS_NEXT;
   1881}
   1882
   1883static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
   1884{
   1885    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   1886
   1887    if (!m34) {
   1888        return DISAS_NORETURN;
   1889    }
   1890    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
   1891    tcg_temp_free_i32(m34);
   1892    set_cc_static(s);
   1893    return DISAS_NEXT;
   1894}
   1895
   1896static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
   1897{
   1898    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   1899
   1900    if (!m34) {
   1901        return DISAS_NORETURN;
   1902    }
   1903    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
   1904    tcg_temp_free_i32(m34);
   1905    set_cc_static(s);
   1906    return DISAS_NEXT;
   1907}
   1908
   1909static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
   1910{
   1911    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   1912
   1913    if (!m34) {
   1914        return DISAS_NORETURN;
   1915    }
   1916    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
   1917    tcg_temp_free_i32(m34);
   1918    set_cc_static(s);
   1919    return DISAS_NEXT;
   1920}
   1921
   1922static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
   1923{
   1924    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   1925
   1926    if (!m34) {
   1927        return DISAS_NORETURN;
   1928    }
   1929    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
   1930    tcg_temp_free_i32(m34);
   1931    set_cc_static(s);
   1932    return DISAS_NEXT;
   1933}
   1934
   1935static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
   1936{
   1937    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   1938
   1939    if (!m34) {
   1940        return DISAS_NORETURN;
   1941    }
   1942    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
   1943    tcg_temp_free_i32(m34);
   1944    set_cc_static(s);
   1945    return DISAS_NEXT;
   1946}
   1947
   1948static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
   1949{
   1950    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   1951
   1952    if (!m34) {
   1953        return DISAS_NORETURN;
   1954    }
   1955    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
   1956    tcg_temp_free_i32(m34);
   1957    set_cc_static(s);
   1958    return DISAS_NEXT;
   1959}
   1960
   1961static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
   1962{
   1963    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
   1964
   1965    if (!m34) {
   1966        return DISAS_NORETURN;
   1967    }
   1968    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
   1969    tcg_temp_free_i32(m34);
   1970    return DISAS_NEXT;
   1971}
   1972
   1973static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
   1974{
   1975    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
   1976
   1977    if (!m34) {
   1978        return DISAS_NORETURN;
   1979    }
   1980    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
   1981    tcg_temp_free_i32(m34);
   1982    return DISAS_NEXT;
   1983}
   1984
   1985static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
   1986{
   1987    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
   1988
   1989    if (!m34) {
   1990        return DISAS_NORETURN;
   1991    }
   1992    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
   1993    tcg_temp_free_i32(m34);
   1994    return_low128(o->out2);
   1995    return DISAS_NEXT;
   1996}
   1997
   1998static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
   1999{
   2000    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   2001
   2002    if (!m34) {
   2003        return DISAS_NORETURN;
   2004    }
   2005    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
   2006    tcg_temp_free_i32(m34);
   2007    return DISAS_NEXT;
   2008}
   2009
   2010static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
   2011{
   2012    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   2013
   2014    if (!m34) {
   2015        return DISAS_NORETURN;
   2016    }
   2017    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
   2018    tcg_temp_free_i32(m34);
   2019    return DISAS_NEXT;
   2020}
   2021
   2022static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
   2023{
   2024    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
   2025
   2026    if (!m34) {
   2027        return DISAS_NORETURN;
   2028    }
   2029    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
   2030    tcg_temp_free_i32(m34);
   2031    return_low128(o->out2);
   2032    return DISAS_NEXT;
   2033}
   2034
   2035static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
   2036{
   2037    int r2 = get_field(s, r2);
   2038    TCGv_i64 len = tcg_temp_new_i64();
   2039
   2040    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
   2041    set_cc_static(s);
   2042    return_low128(o->out);
   2043
   2044    tcg_gen_add_i64(regs[r2], regs[r2], len);
   2045    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
   2046    tcg_temp_free_i64(len);
   2047
   2048    return DISAS_NEXT;
   2049}
   2050
   2051static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
   2052{
   2053    int l = get_field(s, l1);
   2054    TCGv_i32 vl;
   2055
   2056    switch (l + 1) {
   2057    case 1:
   2058        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
   2059        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
   2060        break;
   2061    case 2:
   2062        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
   2063        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
   2064        break;
   2065    case 4:
   2066        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
   2067        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
   2068        break;
   2069    case 8:
   2070        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
   2071        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
   2072        break;
   2073    default:
   2074        vl = tcg_const_i32(l);
   2075        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
   2076        tcg_temp_free_i32(vl);
   2077        set_cc_static(s);
   2078        return DISAS_NEXT;
   2079    }
   2080    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
   2081    return DISAS_NEXT;
   2082}
   2083
   2084static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
   2085{
   2086    int r1 = get_field(s, r1);
   2087    int r2 = get_field(s, r2);
   2088    TCGv_i32 t1, t2;
   2089
   2090    /* r1 and r2 must be even.  */
   2091    if (r1 & 1 || r2 & 1) {
   2092        gen_program_exception(s, PGM_SPECIFICATION);
   2093        return DISAS_NORETURN;
   2094    }
   2095
   2096    t1 = tcg_const_i32(r1);
   2097    t2 = tcg_const_i32(r2);
   2098    gen_helper_clcl(cc_op, cpu_env, t1, t2);
   2099    tcg_temp_free_i32(t1);
   2100    tcg_temp_free_i32(t2);
   2101    set_cc_static(s);
   2102    return DISAS_NEXT;
   2103}
   2104
   2105static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
   2106{
   2107    int r1 = get_field(s, r1);
   2108    int r3 = get_field(s, r3);
   2109    TCGv_i32 t1, t3;
   2110
   2111    /* r1 and r3 must be even.  */
   2112    if (r1 & 1 || r3 & 1) {
   2113        gen_program_exception(s, PGM_SPECIFICATION);
   2114        return DISAS_NORETURN;
   2115    }
   2116
   2117    t1 = tcg_const_i32(r1);
   2118    t3 = tcg_const_i32(r3);
   2119    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
   2120    tcg_temp_free_i32(t1);
   2121    tcg_temp_free_i32(t3);
   2122    set_cc_static(s);
   2123    return DISAS_NEXT;
   2124}
   2125
   2126static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
   2127{
   2128    int r1 = get_field(s, r1);
   2129    int r3 = get_field(s, r3);
   2130    TCGv_i32 t1, t3;
   2131
   2132    /* r1 and r3 must be even.  */
   2133    if (r1 & 1 || r3 & 1) {
   2134        gen_program_exception(s, PGM_SPECIFICATION);
   2135        return DISAS_NORETURN;
   2136    }
   2137
   2138    t1 = tcg_const_i32(r1);
   2139    t3 = tcg_const_i32(r3);
   2140    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
   2141    tcg_temp_free_i32(t1);
   2142    tcg_temp_free_i32(t3);
   2143    set_cc_static(s);
   2144    return DISAS_NEXT;
   2145}
   2146
   2147static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
   2148{
   2149    TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
   2150    TCGv_i32 t1 = tcg_temp_new_i32();
   2151    tcg_gen_extrl_i64_i32(t1, o->in1);
   2152    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
   2153    set_cc_static(s);
   2154    tcg_temp_free_i32(t1);
   2155    tcg_temp_free_i32(m3);
   2156    return DISAS_NEXT;
   2157}
   2158
   2159static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
   2160{
   2161    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
   2162    set_cc_static(s);
   2163    return_low128(o->in2);
   2164    return DISAS_NEXT;
   2165}
   2166
   2167static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
   2168{
   2169    TCGv_i64 t = tcg_temp_new_i64();
   2170    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
   2171    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
   2172    tcg_gen_or_i64(o->out, o->out, t);
   2173    tcg_temp_free_i64(t);
   2174    return DISAS_NEXT;
   2175}
   2176
   2177static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
   2178{
   2179    int d2 = get_field(s, d2);
   2180    int b2 = get_field(s, b2);
   2181    TCGv_i64 addr, cc;
   2182
   2183    /* Note that in1 = R3 (new value) and
   2184       in2 = (zero-extended) R1 (expected value).  */
   2185
   2186    addr = get_address(s, 0, b2, d2);
   2187    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
   2188                               get_mem_index(s), s->insn->data | MO_ALIGN);
   2189    tcg_temp_free_i64(addr);
   2190
   2191    /* Are the memory and expected values (un)equal?  Note that this setcond
   2192       produces the output CC value, thus the NE sense of the test.  */
   2193    cc = tcg_temp_new_i64();
   2194    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
   2195    tcg_gen_extrl_i64_i32(cc_op, cc);
   2196    tcg_temp_free_i64(cc);
   2197    set_cc_static(s);
   2198
   2199    return DISAS_NEXT;
   2200}
   2201
   2202static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
   2203{
   2204    int r1 = get_field(s, r1);
   2205    int r3 = get_field(s, r3);
   2206    int d2 = get_field(s, d2);
   2207    int b2 = get_field(s, b2);
   2208    DisasJumpType ret = DISAS_NEXT;
   2209    TCGv_i64 addr;
   2210    TCGv_i32 t_r1, t_r3;
   2211
   2212    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
   2213    addr = get_address(s, 0, b2, d2);
   2214    t_r1 = tcg_const_i32(r1);
   2215    t_r3 = tcg_const_i32(r3);
   2216    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
   2217        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
   2218    } else if (HAVE_CMPXCHG128) {
   2219        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
   2220    } else {
   2221        gen_helper_exit_atomic(cpu_env);
   2222        ret = DISAS_NORETURN;
   2223    }
   2224    tcg_temp_free_i64(addr);
   2225    tcg_temp_free_i32(t_r1);
   2226    tcg_temp_free_i32(t_r3);
   2227
   2228    set_cc_static(s);
   2229    return ret;
   2230}
   2231
   2232static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
   2233{
   2234    int r3 = get_field(s, r3);
   2235    TCGv_i32 t_r3 = tcg_const_i32(r3);
   2236
   2237    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
   2238        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
   2239    } else {
   2240        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
   2241    }
   2242    tcg_temp_free_i32(t_r3);
   2243
   2244    set_cc_static(s);
   2245    return DISAS_NEXT;
   2246}
   2247
   2248#ifndef CONFIG_USER_ONLY
   2249static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
   2250{
   2251    MemOp mop = s->insn->data;
   2252    TCGv_i64 addr, old, cc;
   2253    TCGLabel *lab = gen_new_label();
   2254
   2255    /* Note that in1 = R1 (zero-extended expected value),
   2256       out = R1 (original reg), out2 = R1+1 (new value).  */
   2257
   2258    addr = tcg_temp_new_i64();
   2259    old = tcg_temp_new_i64();
   2260    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
   2261    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
   2262                               get_mem_index(s), mop | MO_ALIGN);
   2263    tcg_temp_free_i64(addr);
   2264
   2265    /* Are the memory and expected values (un)equal?  */
   2266    cc = tcg_temp_new_i64();
   2267    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
   2268    tcg_gen_extrl_i64_i32(cc_op, cc);
   2269
   2270    /* Write back the output now, so that it happens before the
   2271       following branch, so that we don't need local temps.  */
   2272    if ((mop & MO_SIZE) == MO_32) {
   2273        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
   2274    } else {
   2275        tcg_gen_mov_i64(o->out, old);
   2276    }
   2277    tcg_temp_free_i64(old);
   2278
   2279    /* If the comparison was equal, and the LSB of R2 was set,
   2280       then we need to flush the TLB (for all cpus).  */
   2281    tcg_gen_xori_i64(cc, cc, 1);
   2282    tcg_gen_and_i64(cc, cc, o->in2);
   2283    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
   2284    tcg_temp_free_i64(cc);
   2285
   2286    gen_helper_purge(cpu_env);
   2287    gen_set_label(lab);
   2288
   2289    return DISAS_NEXT;
   2290}
   2291#endif
   2292
   2293static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
   2294{
   2295    TCGv_i64 t1 = tcg_temp_new_i64();
   2296    TCGv_i32 t2 = tcg_temp_new_i32();
   2297    tcg_gen_extrl_i64_i32(t2, o->in1);
   2298    gen_helper_cvd(t1, t2);
   2299    tcg_temp_free_i32(t2);
   2300    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
   2301    tcg_temp_free_i64(t1);
   2302    return DISAS_NEXT;
   2303}
   2304
   2305static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
   2306{
   2307    int m3 = get_field(s, m3);
   2308    TCGLabel *lab = gen_new_label();
   2309    TCGCond c;
   2310
   2311    c = tcg_invert_cond(ltgt_cond[m3]);
   2312    if (s->insn->data) {
   2313        c = tcg_unsigned_cond(c);
   2314    }
   2315    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
   2316
   2317    /* Trap.  */
   2318    gen_trap(s);
   2319
   2320    gen_set_label(lab);
   2321    return DISAS_NEXT;
   2322}
   2323
   2324static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
   2325{
   2326    int m3 = get_field(s, m3);
   2327    int r1 = get_field(s, r1);
   2328    int r2 = get_field(s, r2);
   2329    TCGv_i32 tr1, tr2, chk;
   2330
   2331    /* R1 and R2 must both be even.  */
   2332    if ((r1 | r2) & 1) {
   2333        gen_program_exception(s, PGM_SPECIFICATION);
   2334        return DISAS_NORETURN;
   2335    }
   2336    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
   2337        m3 = 0;
   2338    }
   2339
   2340    tr1 = tcg_const_i32(r1);
   2341    tr2 = tcg_const_i32(r2);
   2342    chk = tcg_const_i32(m3);
   2343
   2344    switch (s->insn->data) {
   2345    case 12:
   2346        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
   2347        break;
   2348    case 14:
   2349        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
   2350        break;
   2351    case 21:
   2352        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
   2353        break;
   2354    case 24:
   2355        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
   2356        break;
   2357    case 41:
   2358        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
   2359        break;
   2360    case 42:
   2361        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
   2362        break;
   2363    default:
   2364        g_assert_not_reached();
   2365    }
   2366
   2367    tcg_temp_free_i32(tr1);
   2368    tcg_temp_free_i32(tr2);
   2369    tcg_temp_free_i32(chk);
   2370    set_cc_static(s);
   2371    return DISAS_NEXT;
   2372}
   2373
   2374#ifndef CONFIG_USER_ONLY
   2375static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
   2376{
   2377    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   2378    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   2379    TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
   2380
   2381    gen_helper_diag(cpu_env, r1, r3, func_code);
   2382
   2383    tcg_temp_free_i32(func_code);
   2384    tcg_temp_free_i32(r3);
   2385    tcg_temp_free_i32(r1);
   2386    return DISAS_NEXT;
   2387}
   2388#endif
   2389
   2390static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
   2391{
   2392    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
   2393    return_low128(o->out);
   2394    return DISAS_NEXT;
   2395}
   2396
   2397static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
   2398{
   2399    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
   2400    return_low128(o->out);
   2401    return DISAS_NEXT;
   2402}
   2403
   2404static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
   2405{
   2406    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
   2407    return_low128(o->out);
   2408    return DISAS_NEXT;
   2409}
   2410
   2411static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
   2412{
   2413    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
   2414    return_low128(o->out);
   2415    return DISAS_NEXT;
   2416}
   2417
   2418static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
   2419{
   2420    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
   2421    return DISAS_NEXT;
   2422}
   2423
   2424static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
   2425{
   2426    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
   2427    return DISAS_NEXT;
   2428}
   2429
   2430static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
   2431{
   2432    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
   2433    return_low128(o->out2);
   2434    return DISAS_NEXT;
   2435}
   2436
   2437static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
   2438{
   2439    int r2 = get_field(s, r2);
   2440    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
   2441    return DISAS_NEXT;
   2442}
   2443
   2444static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
   2445{
   2446    /* No cache information provided.  */
   2447    tcg_gen_movi_i64(o->out, -1);
   2448    return DISAS_NEXT;
   2449}
   2450
   2451static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
   2452{
   2453    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
   2454    return DISAS_NEXT;
   2455}
   2456
   2457static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
   2458{
   2459    int r1 = get_field(s, r1);
   2460    int r2 = get_field(s, r2);
   2461    TCGv_i64 t = tcg_temp_new_i64();
   2462
   2463    /* Note the "subsequently" in the PoO, which implies a defined result
   2464       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
   2465    tcg_gen_shri_i64(t, psw_mask, 32);
   2466    store_reg32_i64(r1, t);
   2467    if (r2 != 0) {
   2468        store_reg32_i64(r2, psw_mask);
   2469    }
   2470
   2471    tcg_temp_free_i64(t);
   2472    return DISAS_NEXT;
   2473}
   2474
   2475static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
   2476{
   2477    int r1 = get_field(s, r1);
   2478    TCGv_i32 ilen;
   2479    TCGv_i64 v1;
   2480
   2481    /* Nested EXECUTE is not allowed.  */
   2482    if (unlikely(s->ex_value)) {
   2483        gen_program_exception(s, PGM_EXECUTE);
   2484        return DISAS_NORETURN;
   2485    }
   2486
   2487    update_psw_addr(s);
   2488    update_cc_op(s);
   2489
   2490    if (r1 == 0) {
   2491        v1 = tcg_const_i64(0);
   2492    } else {
   2493        v1 = regs[r1];
   2494    }
   2495
   2496    ilen = tcg_const_i32(s->ilen);
   2497    gen_helper_ex(cpu_env, ilen, v1, o->in2);
   2498    tcg_temp_free_i32(ilen);
   2499
   2500    if (r1 == 0) {
   2501        tcg_temp_free_i64(v1);
   2502    }
   2503
   2504    return DISAS_PC_CC_UPDATED;
   2505}
   2506
   2507static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
   2508{
   2509    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   2510
   2511    if (!m34) {
   2512        return DISAS_NORETURN;
   2513    }
   2514    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
   2515    tcg_temp_free_i32(m34);
   2516    return DISAS_NEXT;
   2517}
   2518
   2519static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
   2520{
   2521    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   2522
   2523    if (!m34) {
   2524        return DISAS_NORETURN;
   2525    }
   2526    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
   2527    tcg_temp_free_i32(m34);
   2528    return DISAS_NEXT;
   2529}
   2530
   2531static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
   2532{
   2533    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
   2534
   2535    if (!m34) {
   2536        return DISAS_NORETURN;
   2537    }
   2538    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
   2539    return_low128(o->out2);
   2540    tcg_temp_free_i32(m34);
   2541    return DISAS_NEXT;
   2542}
   2543
   2544static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
   2545{
   2546    /* We'll use the original input for cc computation, since we get to
   2547       compare that against 0, which ought to be better than comparing
   2548       the real output against 64.  It also lets cc_dst be a convenient
   2549       temporary during our computation.  */
   2550    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
   2551
   2552    /* R1 = IN ? CLZ(IN) : 64.  */
   2553    tcg_gen_clzi_i64(o->out, o->in2, 64);
   2554
   2555    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
   2556       value by 64, which is undefined.  But since the shift is 64 iff the
   2557       input is zero, we still get the correct result after and'ing.  */
   2558    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
   2559    tcg_gen_shr_i64(o->out2, o->out2, o->out);
   2560    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
   2561    return DISAS_NEXT;
   2562}
   2563
   2564static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
   2565{
   2566    int m3 = get_field(s, m3);
   2567    int pos, len, base = s->insn->data;
   2568    TCGv_i64 tmp = tcg_temp_new_i64();
   2569    uint64_t ccm;
   2570
   2571    switch (m3) {
   2572    case 0xf:
   2573        /* Effectively a 32-bit load.  */
   2574        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
   2575        len = 32;
   2576        goto one_insert;
   2577
   2578    case 0xc:
   2579    case 0x6:
   2580    case 0x3:
   2581        /* Effectively a 16-bit load.  */
   2582        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
   2583        len = 16;
   2584        goto one_insert;
   2585
   2586    case 0x8:
   2587    case 0x4:
   2588    case 0x2:
   2589    case 0x1:
   2590        /* Effectively an 8-bit load.  */
   2591        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
   2592        len = 8;
   2593        goto one_insert;
   2594
   2595    one_insert:
   2596        pos = base + ctz32(m3) * 8;
   2597        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
   2598        ccm = ((1ull << len) - 1) << pos;
   2599        break;
   2600
   2601    default:
   2602        /* This is going to be a sequence of loads and inserts.  */
   2603        pos = base + 32 - 8;
   2604        ccm = 0;
   2605        while (m3) {
   2606            if (m3 & 0x8) {
   2607                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
   2608                tcg_gen_addi_i64(o->in2, o->in2, 1);
   2609                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
   2610                ccm |= 0xff << pos;
   2611            }
   2612            m3 = (m3 << 1) & 0xf;
   2613            pos -= 8;
   2614        }
   2615        break;
   2616    }
   2617
   2618    tcg_gen_movi_i64(tmp, ccm);
   2619    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
   2620    tcg_temp_free_i64(tmp);
   2621    return DISAS_NEXT;
   2622}
   2623
   2624static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
   2625{
   2626    int shift = s->insn->data & 0xff;
   2627    int size = s->insn->data >> 8;
   2628    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
   2629    return DISAS_NEXT;
   2630}
   2631
   2632static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
   2633{
   2634    TCGv_i64 t1, t2;
   2635
   2636    gen_op_calc_cc(s);
   2637    t1 = tcg_temp_new_i64();
   2638    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
   2639    t2 = tcg_temp_new_i64();
   2640    tcg_gen_extu_i32_i64(t2, cc_op);
   2641    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
   2642    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
   2643    tcg_temp_free_i64(t1);
   2644    tcg_temp_free_i64(t2);
   2645    return DISAS_NEXT;
   2646}
   2647
   2648#ifndef CONFIG_USER_ONLY
   2649static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
   2650{
   2651    TCGv_i32 m4;
   2652
   2653    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
   2654        m4 = tcg_const_i32(get_field(s, m4));
   2655    } else {
   2656        m4 = tcg_const_i32(0);
   2657    }
   2658    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
   2659    tcg_temp_free_i32(m4);
   2660    return DISAS_NEXT;
   2661}
   2662
   2663static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
   2664{
   2665    TCGv_i32 m4;
   2666
   2667    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
   2668        m4 = tcg_const_i32(get_field(s, m4));
   2669    } else {
   2670        m4 = tcg_const_i32(0);
   2671    }
   2672    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
   2673    tcg_temp_free_i32(m4);
   2674    return DISAS_NEXT;
   2675}
   2676
   2677static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
   2678{
   2679    gen_helper_iske(o->out, cpu_env, o->in2);
   2680    return DISAS_NEXT;
   2681}
   2682#endif
   2683
   2684static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
   2685{
   2686    int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
   2687    int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
   2688    int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
   2689    TCGv_i32 t_r1, t_r2, t_r3, type;
   2690
   2691    switch (s->insn->data) {
   2692    case S390_FEAT_TYPE_KMA:
   2693        if (r3 == r1 || r3 == r2) {
   2694            gen_program_exception(s, PGM_SPECIFICATION);
   2695            return DISAS_NORETURN;
   2696        }
   2697        /* FALL THROUGH */
   2698    case S390_FEAT_TYPE_KMCTR:
   2699        if (r3 & 1 || !r3) {
   2700            gen_program_exception(s, PGM_SPECIFICATION);
   2701            return DISAS_NORETURN;
   2702        }
   2703        /* FALL THROUGH */
   2704    case S390_FEAT_TYPE_PPNO:
   2705    case S390_FEAT_TYPE_KMF:
   2706    case S390_FEAT_TYPE_KMC:
   2707    case S390_FEAT_TYPE_KMO:
   2708    case S390_FEAT_TYPE_KM:
   2709        if (r1 & 1 || !r1) {
   2710            gen_program_exception(s, PGM_SPECIFICATION);
   2711            return DISAS_NORETURN;
   2712        }
   2713        /* FALL THROUGH */
   2714    case S390_FEAT_TYPE_KMAC:
   2715    case S390_FEAT_TYPE_KIMD:
   2716    case S390_FEAT_TYPE_KLMD:
   2717        if (r2 & 1 || !r2) {
   2718            gen_program_exception(s, PGM_SPECIFICATION);
   2719            return DISAS_NORETURN;
   2720        }
   2721        /* FALL THROUGH */
   2722    case S390_FEAT_TYPE_PCKMO:
   2723    case S390_FEAT_TYPE_PCC:
   2724        break;
   2725    default:
   2726        g_assert_not_reached();
   2727    };
   2728
   2729    t_r1 = tcg_const_i32(r1);
   2730    t_r2 = tcg_const_i32(r2);
   2731    t_r3 = tcg_const_i32(r3);
   2732    type = tcg_const_i32(s->insn->data);
   2733    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
   2734    set_cc_static(s);
   2735    tcg_temp_free_i32(t_r1);
   2736    tcg_temp_free_i32(t_r2);
   2737    tcg_temp_free_i32(t_r3);
   2738    tcg_temp_free_i32(type);
   2739    return DISAS_NEXT;
   2740}
   2741
   2742static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
   2743{
   2744    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
   2745    set_cc_static(s);
   2746    return DISAS_NEXT;
   2747}
   2748
   2749static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
   2750{
   2751    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
   2752    set_cc_static(s);
   2753    return DISAS_NEXT;
   2754}
   2755
   2756static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
   2757{
   2758    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
   2759    set_cc_static(s);
   2760    return DISAS_NEXT;
   2761}
   2762
   2763static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
   2764{
   2765    /* The real output is indeed the original value in memory;
   2766       recompute the addition for the computation of CC.  */
   2767    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
   2768                                 s->insn->data | MO_ALIGN);
   2769    /* However, we need to recompute the addition for setting CC.  */
   2770    tcg_gen_add_i64(o->out, o->in1, o->in2);
   2771    return DISAS_NEXT;
   2772}
   2773
   2774static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
   2775{
   2776    /* The real output is indeed the original value in memory;
   2777       recompute the addition for the computation of CC.  */
   2778    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
   2779                                 s->insn->data | MO_ALIGN);
   2780    /* However, we need to recompute the operation for setting CC.  */
   2781    tcg_gen_and_i64(o->out, o->in1, o->in2);
   2782    return DISAS_NEXT;
   2783}
   2784
   2785static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
   2786{
   2787    /* The real output is indeed the original value in memory;
   2788       recompute the addition for the computation of CC.  */
   2789    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
   2790                                s->insn->data | MO_ALIGN);
   2791    /* However, we need to recompute the operation for setting CC.  */
   2792    tcg_gen_or_i64(o->out, o->in1, o->in2);
   2793    return DISAS_NEXT;
   2794}
   2795
   2796static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
   2797{
   2798    /* The real output is indeed the original value in memory;
   2799       recompute the addition for the computation of CC.  */
   2800    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
   2801                                 s->insn->data | MO_ALIGN);
   2802    /* However, we need to recompute the operation for setting CC.  */
   2803    tcg_gen_xor_i64(o->out, o->in1, o->in2);
   2804    return DISAS_NEXT;
   2805}
   2806
   2807static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
   2808{
   2809    gen_helper_ldeb(o->out, cpu_env, o->in2);
   2810    return DISAS_NEXT;
   2811}
   2812
   2813static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
   2814{
   2815    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
   2816
   2817    if (!m34) {
   2818        return DISAS_NORETURN;
   2819    }
   2820    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
   2821    tcg_temp_free_i32(m34);
   2822    return DISAS_NEXT;
   2823}
   2824
   2825static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
   2826{
   2827    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
   2828
   2829    if (!m34) {
   2830        return DISAS_NORETURN;
   2831    }
   2832    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
   2833    tcg_temp_free_i32(m34);
   2834    return DISAS_NEXT;
   2835}
   2836
   2837static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
   2838{
   2839    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
   2840
   2841    if (!m34) {
   2842        return DISAS_NORETURN;
   2843    }
   2844    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
   2845    tcg_temp_free_i32(m34);
   2846    return DISAS_NEXT;
   2847}
   2848
   2849static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
   2850{
   2851    gen_helper_lxdb(o->out, cpu_env, o->in2);
   2852    return_low128(o->out2);
   2853    return DISAS_NEXT;
   2854}
   2855
   2856static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
   2857{
   2858    gen_helper_lxeb(o->out, cpu_env, o->in2);
   2859    return_low128(o->out2);
   2860    return DISAS_NEXT;
   2861}
   2862
   2863static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
   2864{
   2865    tcg_gen_shli_i64(o->out, o->in2, 32);
   2866    return DISAS_NEXT;
   2867}
   2868
   2869static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
   2870{
   2871    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
   2872    return DISAS_NEXT;
   2873}
   2874
   2875static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
   2876{
   2877    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
   2878    return DISAS_NEXT;
   2879}
   2880
   2881static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
   2882{
   2883    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
   2884    return DISAS_NEXT;
   2885}
   2886
   2887static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
   2888{
   2889    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
   2890    return DISAS_NEXT;
   2891}
   2892
   2893static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
   2894{
   2895    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
   2896    return DISAS_NEXT;
   2897}
   2898
   2899static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
   2900{
   2901    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
   2902    return DISAS_NEXT;
   2903}
   2904
   2905static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
   2906{
   2907    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
   2908    return DISAS_NEXT;
   2909}
   2910
   2911static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
   2912{
   2913    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
   2914    return DISAS_NEXT;
   2915}
   2916
   2917static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
   2918{
   2919    TCGLabel *lab = gen_new_label();
   2920    store_reg32_i64(get_field(s, r1), o->in2);
   2921    /* The value is stored even in case of trap. */
   2922    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
   2923    gen_trap(s);
   2924    gen_set_label(lab);
   2925    return DISAS_NEXT;
   2926}
   2927
   2928static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
   2929{
   2930    TCGLabel *lab = gen_new_label();
   2931    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
   2932    /* The value is stored even in case of trap. */
   2933    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
   2934    gen_trap(s);
   2935    gen_set_label(lab);
   2936    return DISAS_NEXT;
   2937}
   2938
   2939static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
   2940{
   2941    TCGLabel *lab = gen_new_label();
   2942    store_reg32h_i64(get_field(s, r1), o->in2);
   2943    /* The value is stored even in case of trap. */
   2944    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
   2945    gen_trap(s);
   2946    gen_set_label(lab);
   2947    return DISAS_NEXT;
   2948}
   2949
   2950static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
   2951{
   2952    TCGLabel *lab = gen_new_label();
   2953    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
   2954    /* The value is stored even in case of trap. */
   2955    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
   2956    gen_trap(s);
   2957    gen_set_label(lab);
   2958    return DISAS_NEXT;
   2959}
   2960
   2961static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
   2962{
   2963    TCGLabel *lab = gen_new_label();
   2964    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
   2965    /* The value is stored even in case of trap. */
   2966    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
   2967    gen_trap(s);
   2968    gen_set_label(lab);
   2969    return DISAS_NEXT;
   2970}
   2971
   2972static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
   2973{
   2974    DisasCompare c;
   2975
   2976    disas_jcc(s, &c, get_field(s, m3));
   2977
   2978    if (c.is_64) {
   2979        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
   2980                            o->in2, o->in1);
   2981        free_compare(&c);
   2982    } else {
   2983        TCGv_i32 t32 = tcg_temp_new_i32();
   2984        TCGv_i64 t, z;
   2985
   2986        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
   2987        free_compare(&c);
   2988
   2989        t = tcg_temp_new_i64();
   2990        tcg_gen_extu_i32_i64(t, t32);
   2991        tcg_temp_free_i32(t32);
   2992
   2993        z = tcg_const_i64(0);
   2994        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
   2995        tcg_temp_free_i64(t);
   2996        tcg_temp_free_i64(z);
   2997    }
   2998
   2999    return DISAS_NEXT;
   3000}
   3001
   3002#ifndef CONFIG_USER_ONLY
   3003static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
   3004{
   3005    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   3006    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   3007    gen_helper_lctl(cpu_env, r1, o->in2, r3);
   3008    tcg_temp_free_i32(r1);
   3009    tcg_temp_free_i32(r3);
   3010    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
   3011    return DISAS_PC_STALE_NOCHAIN;
   3012}
   3013
   3014static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
   3015{
   3016    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   3017    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   3018    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
   3019    tcg_temp_free_i32(r1);
   3020    tcg_temp_free_i32(r3);
   3021    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
   3022    return DISAS_PC_STALE_NOCHAIN;
   3023}
   3024
   3025static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
   3026{
   3027    gen_helper_lra(o->out, cpu_env, o->in2);
   3028    set_cc_static(s);
   3029    return DISAS_NEXT;
   3030}
   3031
   3032static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
   3033{
   3034    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
   3035    return DISAS_NEXT;
   3036}
   3037
   3038static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
   3039{
   3040    TCGv_i64 t1, t2;
   3041
   3042    per_breaking_event(s);
   3043
   3044    t1 = tcg_temp_new_i64();
   3045    t2 = tcg_temp_new_i64();
   3046    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
   3047                        MO_TEUL | MO_ALIGN_8);
   3048    tcg_gen_addi_i64(o->in2, o->in2, 4);
   3049    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
   3050    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
   3051    tcg_gen_shli_i64(t1, t1, 32);
   3052    gen_helper_load_psw(cpu_env, t1, t2);
   3053    tcg_temp_free_i64(t1);
   3054    tcg_temp_free_i64(t2);
   3055    return DISAS_NORETURN;
   3056}
   3057
   3058static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
   3059{
   3060    TCGv_i64 t1, t2;
   3061
   3062    per_breaking_event(s);
   3063
   3064    t1 = tcg_temp_new_i64();
   3065    t2 = tcg_temp_new_i64();
   3066    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
   3067                        MO_TEQ | MO_ALIGN_8);
   3068    tcg_gen_addi_i64(o->in2, o->in2, 8);
   3069    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
   3070    gen_helper_load_psw(cpu_env, t1, t2);
   3071    tcg_temp_free_i64(t1);
   3072    tcg_temp_free_i64(t2);
   3073    return DISAS_NORETURN;
   3074}
   3075#endif
   3076
   3077static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
   3078{
   3079    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   3080    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   3081    gen_helper_lam(cpu_env, r1, o->in2, r3);
   3082    tcg_temp_free_i32(r1);
   3083    tcg_temp_free_i32(r3);
   3084    return DISAS_NEXT;
   3085}
   3086
   3087static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
   3088{
   3089    int r1 = get_field(s, r1);
   3090    int r3 = get_field(s, r3);
   3091    TCGv_i64 t1, t2;
   3092
   3093    /* Only one register to read. */
   3094    t1 = tcg_temp_new_i64();
   3095    if (unlikely(r1 == r3)) {
   3096        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
   3097        store_reg32_i64(r1, t1);
   3098        tcg_temp_free(t1);
   3099        return DISAS_NEXT;
   3100    }
   3101
   3102    /* First load the values of the first and last registers to trigger
   3103       possible page faults. */
   3104    t2 = tcg_temp_new_i64();
   3105    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
   3106    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
   3107    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
   3108    store_reg32_i64(r1, t1);
   3109    store_reg32_i64(r3, t2);
   3110
   3111    /* Only two registers to read. */
   3112    if (((r1 + 1) & 15) == r3) {
   3113        tcg_temp_free(t2);
   3114        tcg_temp_free(t1);
   3115        return DISAS_NEXT;
   3116    }
   3117
   3118    /* Then load the remaining registers. Page fault can't occur. */
   3119    r3 = (r3 - 1) & 15;
   3120    tcg_gen_movi_i64(t2, 4);
   3121    while (r1 != r3) {
   3122        r1 = (r1 + 1) & 15;
   3123        tcg_gen_add_i64(o->in2, o->in2, t2);
   3124        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
   3125        store_reg32_i64(r1, t1);
   3126    }
   3127    tcg_temp_free(t2);
   3128    tcg_temp_free(t1);
   3129
   3130    return DISAS_NEXT;
   3131}
   3132
   3133static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
   3134{
   3135    int r1 = get_field(s, r1);
   3136    int r3 = get_field(s, r3);
   3137    TCGv_i64 t1, t2;
   3138
   3139    /* Only one register to read. */
   3140    t1 = tcg_temp_new_i64();
   3141    if (unlikely(r1 == r3)) {
   3142        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
   3143        store_reg32h_i64(r1, t1);
   3144        tcg_temp_free(t1);
   3145        return DISAS_NEXT;
   3146    }
   3147
   3148    /* First load the values of the first and last registers to trigger
   3149       possible page faults. */
   3150    t2 = tcg_temp_new_i64();
   3151    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
   3152    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
   3153    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
   3154    store_reg32h_i64(r1, t1);
   3155    store_reg32h_i64(r3, t2);
   3156
   3157    /* Only two registers to read. */
   3158    if (((r1 + 1) & 15) == r3) {
   3159        tcg_temp_free(t2);
   3160        tcg_temp_free(t1);
   3161        return DISAS_NEXT;
   3162    }
   3163
   3164    /* Then load the remaining registers. Page fault can't occur. */
   3165    r3 = (r3 - 1) & 15;
   3166    tcg_gen_movi_i64(t2, 4);
   3167    while (r1 != r3) {
   3168        r1 = (r1 + 1) & 15;
   3169        tcg_gen_add_i64(o->in2, o->in2, t2);
   3170        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
   3171        store_reg32h_i64(r1, t1);
   3172    }
   3173    tcg_temp_free(t2);
   3174    tcg_temp_free(t1);
   3175
   3176    return DISAS_NEXT;
   3177}
   3178
   3179static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
   3180{
   3181    int r1 = get_field(s, r1);
   3182    int r3 = get_field(s, r3);
   3183    TCGv_i64 t1, t2;
   3184
   3185    /* Only one register to read. */
   3186    if (unlikely(r1 == r3)) {
   3187        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
   3188        return DISAS_NEXT;
   3189    }
   3190
   3191    /* First load the values of the first and last registers to trigger
   3192       possible page faults. */
   3193    t1 = tcg_temp_new_i64();
   3194    t2 = tcg_temp_new_i64();
   3195    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
   3196    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
   3197    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
   3198    tcg_gen_mov_i64(regs[r1], t1);
   3199    tcg_temp_free(t2);
   3200
   3201    /* Only two registers to read. */
   3202    if (((r1 + 1) & 15) == r3) {
   3203        tcg_temp_free(t1);
   3204        return DISAS_NEXT;
   3205    }
   3206
   3207    /* Then load the remaining registers. Page fault can't occur. */
   3208    r3 = (r3 - 1) & 15;
   3209    tcg_gen_movi_i64(t1, 8);
   3210    while (r1 != r3) {
   3211        r1 = (r1 + 1) & 15;
   3212        tcg_gen_add_i64(o->in2, o->in2, t1);
   3213        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
   3214    }
   3215    tcg_temp_free(t1);
   3216
   3217    return DISAS_NEXT;
   3218}
   3219
   3220static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
   3221{
   3222    TCGv_i64 a1, a2;
   3223    MemOp mop = s->insn->data;
   3224
   3225    /* In a parallel context, stop the world and single step.  */
   3226    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
   3227        update_psw_addr(s);
   3228        update_cc_op(s);
   3229        gen_exception(EXCP_ATOMIC);
   3230        return DISAS_NORETURN;
   3231    }
   3232
   3233    /* In a serial context, perform the two loads ... */
   3234    a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
   3235    a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
   3236    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
   3237    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
   3238    tcg_temp_free_i64(a1);
   3239    tcg_temp_free_i64(a2);
   3240
   3241    /* ... and indicate that we performed them while interlocked.  */
   3242    gen_op_movi_cc(s, 0);
   3243    return DISAS_NEXT;
   3244}
   3245
   3246static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
   3247{
   3248    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
   3249        gen_helper_lpq(o->out, cpu_env, o->in2);
   3250    } else if (HAVE_ATOMIC128) {
   3251        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
   3252    } else {
   3253        gen_helper_exit_atomic(cpu_env);
   3254        return DISAS_NORETURN;
   3255    }
   3256    return_low128(o->out2);
   3257    return DISAS_NEXT;
   3258}
   3259
   3260#ifndef CONFIG_USER_ONLY
   3261static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
   3262{
   3263    tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
   3264    return DISAS_NEXT;
   3265}
   3266#endif
   3267
   3268static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
   3269{
   3270    tcg_gen_andi_i64(o->out, o->in2, -256);
   3271    return DISAS_NEXT;
   3272}
   3273
   3274static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
   3275{
   3276    const int64_t block_size = (1ull << (get_field(s, m3) + 6));
   3277
   3278    if (get_field(s, m3) > 6) {
   3279        gen_program_exception(s, PGM_SPECIFICATION);
   3280        return DISAS_NORETURN;
   3281    }
   3282
   3283    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
   3284    tcg_gen_neg_i64(o->addr1, o->addr1);
   3285    tcg_gen_movi_i64(o->out, 16);
   3286    tcg_gen_umin_i64(o->out, o->out, o->addr1);
   3287    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
   3288    return DISAS_NEXT;
   3289}
   3290
   3291static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
   3292{
   3293#if !defined(CONFIG_USER_ONLY)
   3294    TCGv_i32 i2;
   3295#endif
   3296    const uint16_t monitor_class = get_field(s, i2);
   3297
   3298    if (monitor_class & 0xff00) {
   3299        gen_program_exception(s, PGM_SPECIFICATION);
   3300        return DISAS_NORETURN;
   3301    }
   3302
   3303#if !defined(CONFIG_USER_ONLY)
   3304    i2 = tcg_const_i32(monitor_class);
   3305    gen_helper_monitor_call(cpu_env, o->addr1, i2);
   3306    tcg_temp_free_i32(i2);
   3307#endif
   3308    /* Defaults to a NOP. */
   3309    return DISAS_NEXT;
   3310}
   3311
   3312static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
   3313{
   3314    o->out = o->in2;
   3315    o->g_out = o->g_in2;
   3316    o->in2 = NULL;
   3317    o->g_in2 = false;
   3318    return DISAS_NEXT;
   3319}
   3320
   3321static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
   3322{
   3323    int b2 = get_field(s, b2);
   3324    TCGv ar1 = tcg_temp_new_i64();
   3325
   3326    o->out = o->in2;
   3327    o->g_out = o->g_in2;
   3328    o->in2 = NULL;
   3329    o->g_in2 = false;
   3330
   3331    switch (s->base.tb->flags & FLAG_MASK_ASC) {
   3332    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
   3333        tcg_gen_movi_i64(ar1, 0);
   3334        break;
   3335    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
   3336        tcg_gen_movi_i64(ar1, 1);
   3337        break;
   3338    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
   3339        if (b2) {
   3340            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
   3341        } else {
   3342            tcg_gen_movi_i64(ar1, 0);
   3343        }
   3344        break;
   3345    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
   3346        tcg_gen_movi_i64(ar1, 2);
   3347        break;
   3348    }
   3349
   3350    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
   3351    tcg_temp_free_i64(ar1);
   3352
   3353    return DISAS_NEXT;
   3354}
   3355
   3356static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
   3357{
   3358    o->out = o->in1;
   3359    o->out2 = o->in2;
   3360    o->g_out = o->g_in1;
   3361    o->g_out2 = o->g_in2;
   3362    o->in1 = NULL;
   3363    o->in2 = NULL;
   3364    o->g_in1 = o->g_in2 = false;
   3365    return DISAS_NEXT;
   3366}
   3367
   3368static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
   3369{
   3370    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3371    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
   3372    tcg_temp_free_i32(l);
   3373    return DISAS_NEXT;
   3374}
   3375
   3376static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
   3377{
   3378    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3379    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
   3380    tcg_temp_free_i32(l);
   3381    return DISAS_NEXT;
   3382}
   3383
   3384static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
   3385{
   3386    int r1 = get_field(s, r1);
   3387    int r2 = get_field(s, r2);
   3388    TCGv_i32 t1, t2;
   3389
   3390    /* r1 and r2 must be even.  */
   3391    if (r1 & 1 || r2 & 1) {
   3392        gen_program_exception(s, PGM_SPECIFICATION);
   3393        return DISAS_NORETURN;
   3394    }
   3395
   3396    t1 = tcg_const_i32(r1);
   3397    t2 = tcg_const_i32(r2);
   3398    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
   3399    tcg_temp_free_i32(t1);
   3400    tcg_temp_free_i32(t2);
   3401    set_cc_static(s);
   3402    return DISAS_NEXT;
   3403}
   3404
   3405static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
   3406{
   3407    int r1 = get_field(s, r1);
   3408    int r3 = get_field(s, r3);
   3409    TCGv_i32 t1, t3;
   3410
   3411    /* r1 and r3 must be even.  */
   3412    if (r1 & 1 || r3 & 1) {
   3413        gen_program_exception(s, PGM_SPECIFICATION);
   3414        return DISAS_NORETURN;
   3415    }
   3416
   3417    t1 = tcg_const_i32(r1);
   3418    t3 = tcg_const_i32(r3);
   3419    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
   3420    tcg_temp_free_i32(t1);
   3421    tcg_temp_free_i32(t3);
   3422    set_cc_static(s);
   3423    return DISAS_NEXT;
   3424}
   3425
   3426static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
   3427{
   3428    int r1 = get_field(s, r1);
   3429    int r3 = get_field(s, r3);
   3430    TCGv_i32 t1, t3;
   3431
   3432    /* r1 and r3 must be even.  */
   3433    if (r1 & 1 || r3 & 1) {
   3434        gen_program_exception(s, PGM_SPECIFICATION);
   3435        return DISAS_NORETURN;
   3436    }
   3437
   3438    t1 = tcg_const_i32(r1);
   3439    t3 = tcg_const_i32(r3);
   3440    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
   3441    tcg_temp_free_i32(t1);
   3442    tcg_temp_free_i32(t3);
   3443    set_cc_static(s);
   3444    return DISAS_NEXT;
   3445}
   3446
   3447static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
   3448{
   3449    int r3 = get_field(s, r3);
   3450    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
   3451    set_cc_static(s);
   3452    return DISAS_NEXT;
   3453}
   3454
   3455#ifndef CONFIG_USER_ONLY
   3456static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
   3457{
   3458    int r1 = get_field(s, l1);
   3459    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
   3460    set_cc_static(s);
   3461    return DISAS_NEXT;
   3462}
   3463
   3464static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
   3465{
   3466    int r1 = get_field(s, l1);
   3467    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
   3468    set_cc_static(s);
   3469    return DISAS_NEXT;
   3470}
   3471#endif
   3472
   3473static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
   3474{
   3475    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3476    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
   3477    tcg_temp_free_i32(l);
   3478    return DISAS_NEXT;
   3479}
   3480
   3481static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
   3482{
   3483    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3484    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
   3485    tcg_temp_free_i32(l);
   3486    return DISAS_NEXT;
   3487}
   3488
   3489static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
   3490{
   3491    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
   3492    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
   3493
   3494    gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
   3495    tcg_temp_free_i32(t1);
   3496    tcg_temp_free_i32(t2);
   3497    set_cc_static(s);
   3498    return DISAS_NEXT;
   3499}
   3500
   3501static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
   3502{
   3503    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
   3504    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
   3505
   3506    gen_helper_mvst(cc_op, cpu_env, t1, t2);
   3507    tcg_temp_free_i32(t1);
   3508    tcg_temp_free_i32(t2);
   3509    set_cc_static(s);
   3510    return DISAS_NEXT;
   3511}
   3512
   3513static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
   3514{
   3515    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3516    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
   3517    tcg_temp_free_i32(l);
   3518    return DISAS_NEXT;
   3519}
   3520
   3521static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
   3522{
   3523    tcg_gen_mul_i64(o->out, o->in1, o->in2);
   3524    return DISAS_NEXT;
   3525}
   3526
   3527static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
   3528{
   3529    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
   3530    return DISAS_NEXT;
   3531}
   3532
   3533static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
   3534{
   3535    tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
   3536    return DISAS_NEXT;
   3537}
   3538
   3539static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
   3540{
   3541    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
   3542    return DISAS_NEXT;
   3543}
   3544
   3545static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
   3546{
   3547    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
   3548    return DISAS_NEXT;
   3549}
   3550
   3551static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
   3552{
   3553    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
   3554    return DISAS_NEXT;
   3555}
   3556
   3557static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
   3558{
   3559    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
   3560    return_low128(o->out2);
   3561    return DISAS_NEXT;
   3562}
   3563
   3564static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
   3565{
   3566    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
   3567    return_low128(o->out2);
   3568    return DISAS_NEXT;
   3569}
   3570
   3571static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
   3572{
   3573    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
   3574    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
   3575    tcg_temp_free_i64(r3);
   3576    return DISAS_NEXT;
   3577}
   3578
   3579static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
   3580{
   3581    TCGv_i64 r3 = load_freg(get_field(s, r3));
   3582    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
   3583    tcg_temp_free_i64(r3);
   3584    return DISAS_NEXT;
   3585}
   3586
   3587static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
   3588{
   3589    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
   3590    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
   3591    tcg_temp_free_i64(r3);
   3592    return DISAS_NEXT;
   3593}
   3594
   3595static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
   3596{
   3597    TCGv_i64 r3 = load_freg(get_field(s, r3));
   3598    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
   3599    tcg_temp_free_i64(r3);
   3600    return DISAS_NEXT;
   3601}
   3602
   3603static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
   3604{
   3605    TCGv_i64 z, n;
   3606    z = tcg_const_i64(0);
   3607    n = tcg_temp_new_i64();
   3608    tcg_gen_neg_i64(n, o->in2);
   3609    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
   3610    tcg_temp_free_i64(n);
   3611    tcg_temp_free_i64(z);
   3612    return DISAS_NEXT;
   3613}
   3614
   3615static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
   3616{
   3617    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
   3618    return DISAS_NEXT;
   3619}
   3620
   3621static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
   3622{
   3623    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
   3624    return DISAS_NEXT;
   3625}
   3626
   3627static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
   3628{
   3629    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
   3630    tcg_gen_mov_i64(o->out2, o->in2);
   3631    return DISAS_NEXT;
   3632}
   3633
   3634static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
   3635{
   3636    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3637    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
   3638    tcg_temp_free_i32(l);
   3639    set_cc_static(s);
   3640    return DISAS_NEXT;
   3641}
   3642
   3643static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
   3644{
   3645    tcg_gen_neg_i64(o->out, o->in2);
   3646    return DISAS_NEXT;
   3647}
   3648
   3649static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
   3650{
   3651    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
   3652    return DISAS_NEXT;
   3653}
   3654
   3655static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
   3656{
   3657    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
   3658    return DISAS_NEXT;
   3659}
   3660
   3661static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
   3662{
   3663    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
   3664    tcg_gen_mov_i64(o->out2, o->in2);
   3665    return DISAS_NEXT;
   3666}
   3667
   3668static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
   3669{
   3670    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3671    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
   3672    tcg_temp_free_i32(l);
   3673    set_cc_static(s);
   3674    return DISAS_NEXT;
   3675}
   3676
   3677static DisasJumpType op_or(DisasContext *s, DisasOps *o)
   3678{
   3679    tcg_gen_or_i64(o->out, o->in1, o->in2);
   3680    return DISAS_NEXT;
   3681}
   3682
   3683static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
   3684{
   3685    int shift = s->insn->data & 0xff;
   3686    int size = s->insn->data >> 8;
   3687    uint64_t mask = ((1ull << size) - 1) << shift;
   3688
   3689    assert(!o->g_in2);
   3690    tcg_gen_shli_i64(o->in2, o->in2, shift);
   3691    tcg_gen_or_i64(o->out, o->in1, o->in2);
   3692
   3693    /* Produce the CC from only the bits manipulated.  */
   3694    tcg_gen_andi_i64(cc_dst, o->out, mask);
   3695    set_cc_nz_u64(s, cc_dst);
   3696    return DISAS_NEXT;
   3697}
   3698
   3699static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
   3700{
   3701    o->in1 = tcg_temp_new_i64();
   3702
   3703    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
   3704        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
   3705    } else {
   3706        /* Perform the atomic operation in memory. */
   3707        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
   3708                                    s->insn->data);
   3709    }
   3710
   3711    /* Recompute also for atomic case: needed for setting CC. */
   3712    tcg_gen_or_i64(o->out, o->in1, o->in2);
   3713
   3714    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
   3715        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
   3716    }
   3717    return DISAS_NEXT;
   3718}
   3719
   3720static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
   3721{
   3722    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   3723    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
   3724    tcg_temp_free_i32(l);
   3725    return DISAS_NEXT;
   3726}
   3727
   3728static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
   3729{
   3730    int l2 = get_field(s, l2) + 1;
   3731    TCGv_i32 l;
   3732
   3733    /* The length must not exceed 32 bytes.  */
   3734    if (l2 > 32) {
   3735        gen_program_exception(s, PGM_SPECIFICATION);
   3736        return DISAS_NORETURN;
   3737    }
   3738    l = tcg_const_i32(l2);
   3739    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
   3740    tcg_temp_free_i32(l);
   3741    return DISAS_NEXT;
   3742}
   3743
   3744static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
   3745{
   3746    int l2 = get_field(s, l2) + 1;
   3747    TCGv_i32 l;
   3748
   3749    /* The length must be even and should not exceed 64 bytes.  */
   3750    if ((l2 & 1) || (l2 > 64)) {
   3751        gen_program_exception(s, PGM_SPECIFICATION);
   3752        return DISAS_NORETURN;
   3753    }
   3754    l = tcg_const_i32(l2);
   3755    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
   3756    tcg_temp_free_i32(l);
   3757    return DISAS_NEXT;
   3758}
   3759
   3760static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
   3761{
   3762    gen_helper_popcnt(o->out, o->in2);
   3763    return DISAS_NEXT;
   3764}
   3765
   3766#ifndef CONFIG_USER_ONLY
   3767static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
   3768{
   3769    gen_helper_ptlb(cpu_env);
   3770    return DISAS_NEXT;
   3771}
   3772#endif
   3773
   3774static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
   3775{
   3776    int i3 = get_field(s, i3);
   3777    int i4 = get_field(s, i4);
   3778    int i5 = get_field(s, i5);
   3779    int do_zero = i4 & 0x80;
   3780    uint64_t mask, imask, pmask;
   3781    int pos, len, rot;
   3782
   3783    /* Adjust the arguments for the specific insn.  */
   3784    switch (s->fields.op2) {
   3785    case 0x55: /* risbg */
   3786    case 0x59: /* risbgn */
   3787        i3 &= 63;
   3788        i4 &= 63;
   3789        pmask = ~0;
   3790        break;
   3791    case 0x5d: /* risbhg */
   3792        i3 &= 31;
   3793        i4 &= 31;
   3794        pmask = 0xffffffff00000000ull;
   3795        break;
   3796    case 0x51: /* risblg */
   3797        i3 = (i3 & 31) + 32;
   3798        i4 = (i4 & 31) + 32;
   3799        pmask = 0x00000000ffffffffull;
   3800        break;
   3801    default:
   3802        g_assert_not_reached();
   3803    }
   3804
   3805    /* MASK is the set of bits to be inserted from R2. */
   3806    if (i3 <= i4) {
   3807        /* [0...i3---i4...63] */
   3808        mask = (-1ull >> i3) & (-1ull << (63 - i4));
   3809    } else {
   3810        /* [0---i4...i3---63] */
   3811        mask = (-1ull >> i3) | (-1ull << (63 - i4));
   3812    }
   3813    /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
   3814    mask &= pmask;
   3815
   3816    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
   3817       insns, we need to keep the other half of the register.  */
   3818    imask = ~mask | ~pmask;
   3819    if (do_zero) {
   3820        imask = ~pmask;
   3821    }
   3822
   3823    len = i4 - i3 + 1;
   3824    pos = 63 - i4;
   3825    rot = i5 & 63;
   3826
   3827    /* In some cases we can implement this with extract.  */
   3828    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
   3829        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
   3830        return DISAS_NEXT;
   3831    }
   3832
   3833    /* In some cases we can implement this with deposit.  */
   3834    if (len > 0 && (imask == 0 || ~mask == imask)) {
   3835        /* Note that we rotate the bits to be inserted to the lsb, not to
   3836           the position as described in the PoO.  */
   3837        rot = (rot - pos) & 63;
   3838    } else {
   3839        pos = -1;
   3840    }
   3841
   3842    /* Rotate the input as necessary.  */
   3843    tcg_gen_rotli_i64(o->in2, o->in2, rot);
   3844
   3845    /* Insert the selected bits into the output.  */
   3846    if (pos >= 0) {
   3847        if (imask == 0) {
   3848            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
   3849        } else {
   3850            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
   3851        }
   3852    } else if (imask == 0) {
   3853        tcg_gen_andi_i64(o->out, o->in2, mask);
   3854    } else {
   3855        tcg_gen_andi_i64(o->in2, o->in2, mask);
   3856        tcg_gen_andi_i64(o->out, o->out, imask);
   3857        tcg_gen_or_i64(o->out, o->out, o->in2);
   3858    }
   3859    return DISAS_NEXT;
   3860}
   3861
   3862static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
   3863{
   3864    int i3 = get_field(s, i3);
   3865    int i4 = get_field(s, i4);
   3866    int i5 = get_field(s, i5);
   3867    uint64_t mask;
   3868
   3869    /* If this is a test-only form, arrange to discard the result.  */
   3870    if (i3 & 0x80) {
   3871        o->out = tcg_temp_new_i64();
   3872        o->g_out = false;
   3873    }
   3874
   3875    i3 &= 63;
   3876    i4 &= 63;
   3877    i5 &= 63;
   3878
   3879    /* MASK is the set of bits to be operated on from R2.
   3880       Take care for I3/I4 wraparound.  */
   3881    mask = ~0ull >> i3;
   3882    if (i3 <= i4) {
   3883        mask ^= ~0ull >> i4 >> 1;
   3884    } else {
   3885        mask |= ~(~0ull >> i4 >> 1);
   3886    }
   3887
   3888    /* Rotate the input as necessary.  */
   3889    tcg_gen_rotli_i64(o->in2, o->in2, i5);
   3890
   3891    /* Operate.  */
   3892    switch (s->fields.op2) {
   3893    case 0x54: /* AND */
   3894        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
   3895        tcg_gen_and_i64(o->out, o->out, o->in2);
   3896        break;
   3897    case 0x56: /* OR */
   3898        tcg_gen_andi_i64(o->in2, o->in2, mask);
   3899        tcg_gen_or_i64(o->out, o->out, o->in2);
   3900        break;
   3901    case 0x57: /* XOR */
   3902        tcg_gen_andi_i64(o->in2, o->in2, mask);
   3903        tcg_gen_xor_i64(o->out, o->out, o->in2);
   3904        break;
   3905    default:
   3906        abort();
   3907    }
   3908
   3909    /* Set the CC.  */
   3910    tcg_gen_andi_i64(cc_dst, o->out, mask);
   3911    set_cc_nz_u64(s, cc_dst);
   3912    return DISAS_NEXT;
   3913}
   3914
   3915static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
   3916{
   3917    tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
   3918    return DISAS_NEXT;
   3919}
   3920
   3921static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
   3922{
   3923    tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
   3924    return DISAS_NEXT;
   3925}
   3926
   3927static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
   3928{
   3929    tcg_gen_bswap64_i64(o->out, o->in2);
   3930    return DISAS_NEXT;
   3931}
   3932
   3933static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
   3934{
   3935    TCGv_i32 t1 = tcg_temp_new_i32();
   3936    TCGv_i32 t2 = tcg_temp_new_i32();
   3937    TCGv_i32 to = tcg_temp_new_i32();
   3938    tcg_gen_extrl_i64_i32(t1, o->in1);
   3939    tcg_gen_extrl_i64_i32(t2, o->in2);
   3940    tcg_gen_rotl_i32(to, t1, t2);
   3941    tcg_gen_extu_i32_i64(o->out, to);
   3942    tcg_temp_free_i32(t1);
   3943    tcg_temp_free_i32(t2);
   3944    tcg_temp_free_i32(to);
   3945    return DISAS_NEXT;
   3946}
   3947
   3948static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
   3949{
   3950    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
   3951    return DISAS_NEXT;
   3952}
   3953
   3954#ifndef CONFIG_USER_ONLY
   3955static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
   3956{
   3957    gen_helper_rrbe(cc_op, cpu_env, o->in2);
   3958    set_cc_static(s);
   3959    return DISAS_NEXT;
   3960}
   3961
   3962static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
   3963{
   3964    gen_helper_sacf(cpu_env, o->in2);
   3965    /* Addressing mode has changed, so end the block.  */
   3966    return DISAS_PC_STALE;
   3967}
   3968#endif
   3969
   3970static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
   3971{
   3972    int sam = s->insn->data;
   3973    TCGv_i64 tsam;
   3974    uint64_t mask;
   3975
   3976    switch (sam) {
   3977    case 0:
   3978        mask = 0xffffff;
   3979        break;
   3980    case 1:
   3981        mask = 0x7fffffff;
   3982        break;
   3983    default:
   3984        mask = -1;
   3985        break;
   3986    }
   3987
   3988    /* Bizarre but true, we check the address of the current insn for the
   3989       specification exception, not the next to be executed.  Thus the PoO
   3990       documents that Bad Things Happen two bytes before the end.  */
   3991    if (s->base.pc_next & ~mask) {
   3992        gen_program_exception(s, PGM_SPECIFICATION);
   3993        return DISAS_NORETURN;
   3994    }
   3995    s->pc_tmp &= mask;
   3996
   3997    tsam = tcg_const_i64(sam);
   3998    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
   3999    tcg_temp_free_i64(tsam);
   4000
   4001    /* Always exit the TB, since we (may have) changed execution mode.  */
   4002    return DISAS_PC_STALE;
   4003}
   4004
   4005static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
   4006{
   4007    int r1 = get_field(s, r1);
   4008    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
   4009    return DISAS_NEXT;
   4010}
   4011
   4012static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
   4013{
   4014    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
   4015    return DISAS_NEXT;
   4016}
   4017
   4018static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
   4019{
   4020    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
   4021    return DISAS_NEXT;
   4022}
   4023
   4024static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
   4025{
   4026    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
   4027    return_low128(o->out2);
   4028    return DISAS_NEXT;
   4029}
   4030
   4031static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
   4032{
   4033    gen_helper_sqeb(o->out, cpu_env, o->in2);
   4034    return DISAS_NEXT;
   4035}
   4036
   4037static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
   4038{
   4039    gen_helper_sqdb(o->out, cpu_env, o->in2);
   4040    return DISAS_NEXT;
   4041}
   4042
   4043static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
   4044{
   4045    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
   4046    return_low128(o->out2);
   4047    return DISAS_NEXT;
   4048}
   4049
   4050#ifndef CONFIG_USER_ONLY
   4051static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
   4052{
   4053    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
   4054    set_cc_static(s);
   4055    return DISAS_NEXT;
   4056}
   4057
   4058static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
   4059{
   4060    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4061    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   4062    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
   4063    set_cc_static(s);
   4064    tcg_temp_free_i32(r1);
   4065    tcg_temp_free_i32(r3);
   4066    return DISAS_NEXT;
   4067}
   4068#endif
   4069
   4070static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
   4071{
   4072    DisasCompare c;
   4073    TCGv_i64 a, h;
   4074    TCGLabel *lab;
   4075    int r1;
   4076
   4077    disas_jcc(s, &c, get_field(s, m3));
   4078
   4079    /* We want to store when the condition is fulfilled, so branch
   4080       out when it's not */
   4081    c.cond = tcg_invert_cond(c.cond);
   4082
   4083    lab = gen_new_label();
   4084    if (c.is_64) {
   4085        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
   4086    } else {
   4087        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
   4088    }
   4089    free_compare(&c);
   4090
   4091    r1 = get_field(s, r1);
   4092    a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
   4093    switch (s->insn->data) {
   4094    case 1: /* STOCG */
   4095        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
   4096        break;
   4097    case 0: /* STOC */
   4098        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
   4099        break;
   4100    case 2: /* STOCFH */
   4101        h = tcg_temp_new_i64();
   4102        tcg_gen_shri_i64(h, regs[r1], 32);
   4103        tcg_gen_qemu_st32(h, a, get_mem_index(s));
   4104        tcg_temp_free_i64(h);
   4105        break;
   4106    default:
   4107        g_assert_not_reached();
   4108    }
   4109    tcg_temp_free_i64(a);
   4110
   4111    gen_set_label(lab);
   4112    return DISAS_NEXT;
   4113}
   4114
   4115static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
   4116{
   4117    uint64_t sign = 1ull << s->insn->data;
   4118    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
   4119    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
   4120    tcg_gen_shl_i64(o->out, o->in1, o->in2);
   4121    /* The arithmetic left shift is curious in that it does not affect
   4122       the sign bit.  Copy that over from the source unchanged.  */
   4123    tcg_gen_andi_i64(o->out, o->out, ~sign);
   4124    tcg_gen_andi_i64(o->in1, o->in1, sign);
   4125    tcg_gen_or_i64(o->out, o->out, o->in1);
   4126    return DISAS_NEXT;
   4127}
   4128
   4129static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
   4130{
   4131    tcg_gen_shl_i64(o->out, o->in1, o->in2);
   4132    return DISAS_NEXT;
   4133}
   4134
   4135static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
   4136{
   4137    tcg_gen_sar_i64(o->out, o->in1, o->in2);
   4138    return DISAS_NEXT;
   4139}
   4140
   4141static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
   4142{
   4143    tcg_gen_shr_i64(o->out, o->in1, o->in2);
   4144    return DISAS_NEXT;
   4145}
   4146
   4147static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
   4148{
   4149    gen_helper_sfpc(cpu_env, o->in2);
   4150    return DISAS_NEXT;
   4151}
   4152
   4153static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
   4154{
   4155    gen_helper_sfas(cpu_env, o->in2);
   4156    return DISAS_NEXT;
   4157}
   4158
   4159static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
   4160{
   4161    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
   4162    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
   4163    gen_helper_srnm(cpu_env, o->addr1);
   4164    return DISAS_NEXT;
   4165}
   4166
   4167static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
   4168{
   4169    /* Bits 0-55 are are ignored. */
   4170    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
   4171    gen_helper_srnm(cpu_env, o->addr1);
   4172    return DISAS_NEXT;
   4173}
   4174
   4175static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
   4176{
   4177    TCGv_i64 tmp = tcg_temp_new_i64();
   4178
   4179    /* Bits other than 61-63 are ignored. */
   4180    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
   4181
   4182    /* No need to call a helper, we don't implement dfp */
   4183    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
   4184    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
   4185    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
   4186
   4187    tcg_temp_free_i64(tmp);
   4188    return DISAS_NEXT;
   4189}
   4190
   4191static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
   4192{
   4193    tcg_gen_extrl_i64_i32(cc_op, o->in1);
   4194    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
   4195    set_cc_static(s);
   4196
   4197    tcg_gen_shri_i64(o->in1, o->in1, 24);
   4198    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
   4199    return DISAS_NEXT;
   4200}
   4201
   4202static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
   4203{
   4204    int b1 = get_field(s, b1);
   4205    int d1 = get_field(s, d1);
   4206    int b2 = get_field(s, b2);
   4207    int d2 = get_field(s, d2);
   4208    int r3 = get_field(s, r3);
   4209    TCGv_i64 tmp = tcg_temp_new_i64();
   4210
   4211    /* fetch all operands first */
   4212    o->in1 = tcg_temp_new_i64();
   4213    tcg_gen_addi_i64(o->in1, regs[b1], d1);
   4214    o->in2 = tcg_temp_new_i64();
   4215    tcg_gen_addi_i64(o->in2, regs[b2], d2);
   4216    o->addr1 = tcg_temp_new_i64();
   4217    gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
   4218
   4219    /* load the third operand into r3 before modifying anything */
   4220    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
   4221
   4222    /* subtract CPU timer from first operand and store in GR0 */
   4223    gen_helper_stpt(tmp, cpu_env);
   4224    tcg_gen_sub_i64(regs[0], o->in1, tmp);
   4225
   4226    /* store second operand in GR1 */
   4227    tcg_gen_mov_i64(regs[1], o->in2);
   4228
   4229    tcg_temp_free_i64(tmp);
   4230    return DISAS_NEXT;
   4231}
   4232
   4233#ifndef CONFIG_USER_ONLY
   4234static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
   4235{
   4236    tcg_gen_shri_i64(o->in2, o->in2, 4);
   4237    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
   4238    return DISAS_NEXT;
   4239}
   4240
   4241static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
   4242{
   4243    gen_helper_sske(cpu_env, o->in1, o->in2);
   4244    return DISAS_NEXT;
   4245}
   4246
   4247static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
   4248{
   4249    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
   4250    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
   4251    return DISAS_PC_STALE_NOCHAIN;
   4252}
   4253
   4254static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
   4255{
   4256    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
   4257    return DISAS_NEXT;
   4258}
   4259#endif
   4260
   4261static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
   4262{
   4263    gen_helper_stck(o->out, cpu_env);
   4264    /* ??? We don't implement clock states.  */
   4265    gen_op_movi_cc(s, 0);
   4266    return DISAS_NEXT;
   4267}
   4268
   4269static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
   4270{
   4271    TCGv_i64 c1 = tcg_temp_new_i64();
   4272    TCGv_i64 c2 = tcg_temp_new_i64();
   4273    TCGv_i64 todpr = tcg_temp_new_i64();
   4274    gen_helper_stck(c1, cpu_env);
   4275    /* 16 bit value store in an uint32_t (only valid bits set) */
   4276    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
   4277    /* Shift the 64-bit value into its place as a zero-extended
   4278       104-bit value.  Note that "bit positions 64-103 are always
   4279       non-zero so that they compare differently to STCK"; we set
   4280       the least significant bit to 1.  */
   4281    tcg_gen_shli_i64(c2, c1, 56);
   4282    tcg_gen_shri_i64(c1, c1, 8);
   4283    tcg_gen_ori_i64(c2, c2, 0x10000);
   4284    tcg_gen_or_i64(c2, c2, todpr);
   4285    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
   4286    tcg_gen_addi_i64(o->in2, o->in2, 8);
   4287    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
   4288    tcg_temp_free_i64(c1);
   4289    tcg_temp_free_i64(c2);
   4290    tcg_temp_free_i64(todpr);
   4291    /* ??? We don't implement clock states.  */
   4292    gen_op_movi_cc(s, 0);
   4293    return DISAS_NEXT;
   4294}
   4295
   4296#ifndef CONFIG_USER_ONLY
   4297static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
   4298{
   4299    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
   4300    gen_helper_sck(cc_op, cpu_env, o->in1);
   4301    set_cc_static(s);
   4302    return DISAS_NEXT;
   4303}
   4304
   4305static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
   4306{
   4307    gen_helper_sckc(cpu_env, o->in2);
   4308    return DISAS_NEXT;
   4309}
   4310
   4311static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
   4312{
   4313    gen_helper_sckpf(cpu_env, regs[0]);
   4314    return DISAS_NEXT;
   4315}
   4316
   4317static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
   4318{
   4319    gen_helper_stckc(o->out, cpu_env);
   4320    return DISAS_NEXT;
   4321}
   4322
   4323static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
   4324{
   4325    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4326    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   4327    gen_helper_stctg(cpu_env, r1, o->in2, r3);
   4328    tcg_temp_free_i32(r1);
   4329    tcg_temp_free_i32(r3);
   4330    return DISAS_NEXT;
   4331}
   4332
   4333static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
   4334{
   4335    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4336    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   4337    gen_helper_stctl(cpu_env, r1, o->in2, r3);
   4338    tcg_temp_free_i32(r1);
   4339    tcg_temp_free_i32(r3);
   4340    return DISAS_NEXT;
   4341}
   4342
   4343static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
   4344{
   4345    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
   4346    return DISAS_NEXT;
   4347}
   4348
   4349static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
   4350{
   4351    gen_helper_spt(cpu_env, o->in2);
   4352    return DISAS_NEXT;
   4353}
   4354
   4355static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
   4356{
   4357    gen_helper_stfl(cpu_env);
   4358    return DISAS_NEXT;
   4359}
   4360
   4361static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
   4362{
   4363    gen_helper_stpt(o->out, cpu_env);
   4364    return DISAS_NEXT;
   4365}
   4366
   4367static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
   4368{
   4369    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
   4370    set_cc_static(s);
   4371    return DISAS_NEXT;
   4372}
   4373
   4374static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
   4375{
   4376    gen_helper_spx(cpu_env, o->in2);
   4377    return DISAS_NEXT;
   4378}
   4379
   4380static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
   4381{
   4382    gen_helper_xsch(cpu_env, regs[1]);
   4383    set_cc_static(s);
   4384    return DISAS_NEXT;
   4385}
   4386
   4387static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
   4388{
   4389    gen_helper_csch(cpu_env, regs[1]);
   4390    set_cc_static(s);
   4391    return DISAS_NEXT;
   4392}
   4393
   4394static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
   4395{
   4396    gen_helper_hsch(cpu_env, regs[1]);
   4397    set_cc_static(s);
   4398    return DISAS_NEXT;
   4399}
   4400
   4401static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
   4402{
   4403    gen_helper_msch(cpu_env, regs[1], o->in2);
   4404    set_cc_static(s);
   4405    return DISAS_NEXT;
   4406}
   4407
   4408static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
   4409{
   4410    gen_helper_rchp(cpu_env, regs[1]);
   4411    set_cc_static(s);
   4412    return DISAS_NEXT;
   4413}
   4414
   4415static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
   4416{
   4417    gen_helper_rsch(cpu_env, regs[1]);
   4418    set_cc_static(s);
   4419    return DISAS_NEXT;
   4420}
   4421
   4422static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
   4423{
   4424    gen_helper_sal(cpu_env, regs[1]);
   4425    return DISAS_NEXT;
   4426}
   4427
   4428static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
   4429{
   4430    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
   4431    return DISAS_NEXT;
   4432}
   4433
   4434static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
   4435{
   4436    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
   4437    gen_op_movi_cc(s, 3);
   4438    return DISAS_NEXT;
   4439}
   4440
   4441static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
   4442{
   4443    /* The instruction is suppressed if not provided. */
   4444    return DISAS_NEXT;
   4445}
   4446
   4447static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
   4448{
   4449    gen_helper_ssch(cpu_env, regs[1], o->in2);
   4450    set_cc_static(s);
   4451    return DISAS_NEXT;
   4452}
   4453
   4454static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
   4455{
   4456    gen_helper_stsch(cpu_env, regs[1], o->in2);
   4457    set_cc_static(s);
   4458    return DISAS_NEXT;
   4459}
   4460
   4461static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
   4462{
   4463    gen_helper_stcrw(cpu_env, o->in2);
   4464    set_cc_static(s);
   4465    return DISAS_NEXT;
   4466}
   4467
   4468static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
   4469{
   4470    gen_helper_tpi(cc_op, cpu_env, o->addr1);
   4471    set_cc_static(s);
   4472    return DISAS_NEXT;
   4473}
   4474
   4475static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
   4476{
   4477    gen_helper_tsch(cpu_env, regs[1], o->in2);
   4478    set_cc_static(s);
   4479    return DISAS_NEXT;
   4480}
   4481
   4482static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
   4483{
   4484    gen_helper_chsc(cpu_env, o->in2);
   4485    set_cc_static(s);
   4486    return DISAS_NEXT;
   4487}
   4488
   4489static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
   4490{
   4491    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
   4492    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
   4493    return DISAS_NEXT;
   4494}
   4495
   4496static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
   4497{
   4498    uint64_t i2 = get_field(s, i2);
   4499    TCGv_i64 t;
   4500
   4501    /* It is important to do what the instruction name says: STORE THEN.
   4502       If we let the output hook perform the store then if we fault and
   4503       restart, we'll have the wrong SYSTEM MASK in place.  */
   4504    t = tcg_temp_new_i64();
   4505    tcg_gen_shri_i64(t, psw_mask, 56);
   4506    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
   4507    tcg_temp_free_i64(t);
   4508
   4509    if (s->fields.op == 0xac) {
   4510        tcg_gen_andi_i64(psw_mask, psw_mask,
   4511                         (i2 << 56) | 0x00ffffffffffffffull);
   4512    } else {
   4513        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
   4514    }
   4515
   4516    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
   4517    return DISAS_PC_STALE_NOCHAIN;
   4518}
   4519
   4520static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
   4521{
   4522    tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
   4523
   4524    if (s->base.tb->flags & FLAG_MASK_PER) {
   4525        update_psw_addr(s);
   4526        gen_helper_per_store_real(cpu_env);
   4527    }
   4528    return DISAS_NEXT;
   4529}
   4530#endif
   4531
   4532static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
   4533{
   4534    gen_helper_stfle(cc_op, cpu_env, o->in2);
   4535    set_cc_static(s);
   4536    return DISAS_NEXT;
   4537}
   4538
   4539static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
   4540{
   4541    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
   4542    return DISAS_NEXT;
   4543}
   4544
   4545static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
   4546{
   4547    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
   4548    return DISAS_NEXT;
   4549}
   4550
   4551static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
   4552{
   4553    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
   4554    return DISAS_NEXT;
   4555}
   4556
   4557static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
   4558{
   4559    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
   4560    return DISAS_NEXT;
   4561}
   4562
   4563static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
   4564{
   4565    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4566    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   4567    gen_helper_stam(cpu_env, r1, o->in2, r3);
   4568    tcg_temp_free_i32(r1);
   4569    tcg_temp_free_i32(r3);
   4570    return DISAS_NEXT;
   4571}
   4572
   4573static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
   4574{
   4575    int m3 = get_field(s, m3);
   4576    int pos, base = s->insn->data;
   4577    TCGv_i64 tmp = tcg_temp_new_i64();
   4578
   4579    pos = base + ctz32(m3) * 8;
   4580    switch (m3) {
   4581    case 0xf:
   4582        /* Effectively a 32-bit store.  */
   4583        tcg_gen_shri_i64(tmp, o->in1, pos);
   4584        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
   4585        break;
   4586
   4587    case 0xc:
   4588    case 0x6:
   4589    case 0x3:
   4590        /* Effectively a 16-bit store.  */
   4591        tcg_gen_shri_i64(tmp, o->in1, pos);
   4592        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
   4593        break;
   4594
   4595    case 0x8:
   4596    case 0x4:
   4597    case 0x2:
   4598    case 0x1:
   4599        /* Effectively an 8-bit store.  */
   4600        tcg_gen_shri_i64(tmp, o->in1, pos);
   4601        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
   4602        break;
   4603
   4604    default:
   4605        /* This is going to be a sequence of shifts and stores.  */
   4606        pos = base + 32 - 8;
   4607        while (m3) {
   4608            if (m3 & 0x8) {
   4609                tcg_gen_shri_i64(tmp, o->in1, pos);
   4610                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
   4611                tcg_gen_addi_i64(o->in2, o->in2, 1);
   4612            }
   4613            m3 = (m3 << 1) & 0xf;
   4614            pos -= 8;
   4615        }
   4616        break;
   4617    }
   4618    tcg_temp_free_i64(tmp);
   4619    return DISAS_NEXT;
   4620}
   4621
   4622static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
   4623{
   4624    int r1 = get_field(s, r1);
   4625    int r3 = get_field(s, r3);
   4626    int size = s->insn->data;
   4627    TCGv_i64 tsize = tcg_const_i64(size);
   4628
   4629    while (1) {
   4630        if (size == 8) {
   4631            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
   4632        } else {
   4633            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
   4634        }
   4635        if (r1 == r3) {
   4636            break;
   4637        }
   4638        tcg_gen_add_i64(o->in2, o->in2, tsize);
   4639        r1 = (r1 + 1) & 15;
   4640    }
   4641
   4642    tcg_temp_free_i64(tsize);
   4643    return DISAS_NEXT;
   4644}
   4645
   4646static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
   4647{
   4648    int r1 = get_field(s, r1);
   4649    int r3 = get_field(s, r3);
   4650    TCGv_i64 t = tcg_temp_new_i64();
   4651    TCGv_i64 t4 = tcg_const_i64(4);
   4652    TCGv_i64 t32 = tcg_const_i64(32);
   4653
   4654    while (1) {
   4655        tcg_gen_shl_i64(t, regs[r1], t32);
   4656        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
   4657        if (r1 == r3) {
   4658            break;
   4659        }
   4660        tcg_gen_add_i64(o->in2, o->in2, t4);
   4661        r1 = (r1 + 1) & 15;
   4662    }
   4663
   4664    tcg_temp_free_i64(t);
   4665    tcg_temp_free_i64(t4);
   4666    tcg_temp_free_i64(t32);
   4667    return DISAS_NEXT;
   4668}
   4669
   4670static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
   4671{
   4672    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
   4673        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
   4674    } else if (HAVE_ATOMIC128) {
   4675        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
   4676    } else {
   4677        gen_helper_exit_atomic(cpu_env);
   4678        return DISAS_NORETURN;
   4679    }
   4680    return DISAS_NEXT;
   4681}
   4682
   4683static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
   4684{
   4685    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4686    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   4687
   4688    gen_helper_srst(cpu_env, r1, r2);
   4689
   4690    tcg_temp_free_i32(r1);
   4691    tcg_temp_free_i32(r2);
   4692    set_cc_static(s);
   4693    return DISAS_NEXT;
   4694}
   4695
   4696static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
   4697{
   4698    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4699    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   4700
   4701    gen_helper_srstu(cpu_env, r1, r2);
   4702
   4703    tcg_temp_free_i32(r1);
   4704    tcg_temp_free_i32(r2);
   4705    set_cc_static(s);
   4706    return DISAS_NEXT;
   4707}
   4708
   4709static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
   4710{
   4711    tcg_gen_sub_i64(o->out, o->in1, o->in2);
   4712    return DISAS_NEXT;
   4713}
   4714
   4715static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
   4716{
   4717    tcg_gen_movi_i64(cc_src, 0);
   4718    tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
   4719    return DISAS_NEXT;
   4720}
   4721
   4722/* Compute borrow (0, -1) into cc_src. */
   4723static void compute_borrow(DisasContext *s)
   4724{
   4725    switch (s->cc_op) {
   4726    case CC_OP_SUBU:
   4727        /* The borrow value is already in cc_src (0,-1). */
   4728        break;
   4729    default:
   4730        gen_op_calc_cc(s);
   4731        /* fall through */
   4732    case CC_OP_STATIC:
   4733        /* The carry flag is the msb of CC; compute into cc_src. */
   4734        tcg_gen_extu_i32_i64(cc_src, cc_op);
   4735        tcg_gen_shri_i64(cc_src, cc_src, 1);
   4736        /* fall through */
   4737    case CC_OP_ADDU:
   4738        /* Convert carry (1,0) to borrow (0,-1). */
   4739        tcg_gen_subi_i64(cc_src, cc_src, 1);
   4740        break;
   4741    }
   4742}
   4743
   4744static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
   4745{
   4746    compute_borrow(s);
   4747
   4748    /* Borrow is {0, -1}, so add to subtract. */
   4749    tcg_gen_add_i64(o->out, o->in1, cc_src);
   4750    tcg_gen_sub_i64(o->out, o->out, o->in2);
   4751    return DISAS_NEXT;
   4752}
   4753
   4754static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
   4755{
   4756    compute_borrow(s);
   4757
   4758    /*
   4759     * Borrow is {0, -1}, so add to subtract; replicate the
   4760     * borrow input to produce 128-bit -1 for the addition.
   4761     */
   4762    TCGv_i64 zero = tcg_const_i64(0);
   4763    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
   4764    tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
   4765    tcg_temp_free_i64(zero);
   4766
   4767    return DISAS_NEXT;
   4768}
   4769
   4770static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
   4771{
   4772    TCGv_i32 t;
   4773
   4774    update_psw_addr(s);
   4775    update_cc_op(s);
   4776
   4777    t = tcg_const_i32(get_field(s, i1) & 0xff);
   4778    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
   4779    tcg_temp_free_i32(t);
   4780
   4781    t = tcg_const_i32(s->ilen);
   4782    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
   4783    tcg_temp_free_i32(t);
   4784
   4785    gen_exception(EXCP_SVC);
   4786    return DISAS_NORETURN;
   4787}
   4788
   4789static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
   4790{
   4791    int cc = 0;
   4792
   4793    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
   4794    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
   4795    gen_op_movi_cc(s, cc);
   4796    return DISAS_NEXT;
   4797}
   4798
   4799static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
   4800{
   4801    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
   4802    set_cc_static(s);
   4803    return DISAS_NEXT;
   4804}
   4805
   4806static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
   4807{
   4808    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
   4809    set_cc_static(s);
   4810    return DISAS_NEXT;
   4811}
   4812
   4813static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
   4814{
   4815    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
   4816    set_cc_static(s);
   4817    return DISAS_NEXT;
   4818}
   4819
   4820#ifndef CONFIG_USER_ONLY
   4821
   4822static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
   4823{
   4824    gen_helper_testblock(cc_op, cpu_env, o->in2);
   4825    set_cc_static(s);
   4826    return DISAS_NEXT;
   4827}
   4828
   4829static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
   4830{
   4831    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
   4832    set_cc_static(s);
   4833    return DISAS_NEXT;
   4834}
   4835
   4836#endif
   4837
   4838static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
   4839{
   4840    TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
   4841    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
   4842    tcg_temp_free_i32(l1);
   4843    set_cc_static(s);
   4844    return DISAS_NEXT;
   4845}
   4846
   4847static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
   4848{
   4849    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   4850    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
   4851    tcg_temp_free_i32(l);
   4852    set_cc_static(s);
   4853    return DISAS_NEXT;
   4854}
   4855
   4856static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
   4857{
   4858    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
   4859    return_low128(o->out2);
   4860    set_cc_static(s);
   4861    return DISAS_NEXT;
   4862}
   4863
   4864static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
   4865{
   4866    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   4867    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
   4868    tcg_temp_free_i32(l);
   4869    set_cc_static(s);
   4870    return DISAS_NEXT;
   4871}
   4872
   4873static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
   4874{
   4875    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   4876    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
   4877    tcg_temp_free_i32(l);
   4878    set_cc_static(s);
   4879    return DISAS_NEXT;
   4880}
   4881
   4882static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
   4883{
   4884    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   4885    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   4886    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
   4887    TCGv_i32 tst = tcg_temp_new_i32();
   4888    int m3 = get_field(s, m3);
   4889
   4890    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
   4891        m3 = 0;
   4892    }
   4893    if (m3 & 1) {
   4894        tcg_gen_movi_i32(tst, -1);
   4895    } else {
   4896        tcg_gen_extrl_i64_i32(tst, regs[0]);
   4897        if (s->insn->opc & 3) {
   4898            tcg_gen_ext8u_i32(tst, tst);
   4899        } else {
   4900            tcg_gen_ext16u_i32(tst, tst);
   4901        }
   4902    }
   4903    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
   4904
   4905    tcg_temp_free_i32(r1);
   4906    tcg_temp_free_i32(r2);
   4907    tcg_temp_free_i32(sizes);
   4908    tcg_temp_free_i32(tst);
   4909    set_cc_static(s);
   4910    return DISAS_NEXT;
   4911}
   4912
   4913static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
   4914{
   4915    TCGv_i32 t1 = tcg_const_i32(0xff);
   4916    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
   4917    tcg_gen_extract_i32(cc_op, t1, 7, 1);
   4918    tcg_temp_free_i32(t1);
   4919    set_cc_static(s);
   4920    return DISAS_NEXT;
   4921}
   4922
   4923static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
   4924{
   4925    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
   4926    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
   4927    tcg_temp_free_i32(l);
   4928    return DISAS_NEXT;
   4929}
   4930
   4931static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
   4932{
   4933    int l1 = get_field(s, l1) + 1;
   4934    TCGv_i32 l;
   4935
   4936    /* The length must not exceed 32 bytes.  */
   4937    if (l1 > 32) {
   4938        gen_program_exception(s, PGM_SPECIFICATION);
   4939        return DISAS_NORETURN;
   4940    }
   4941    l = tcg_const_i32(l1);
   4942    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
   4943    tcg_temp_free_i32(l);
   4944    set_cc_static(s);
   4945    return DISAS_NEXT;
   4946}
   4947
   4948static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
   4949{
   4950    int l1 = get_field(s, l1) + 1;
   4951    TCGv_i32 l;
   4952
   4953    /* The length must be even and should not exceed 64 bytes.  */
   4954    if ((l1 & 1) || (l1 > 64)) {
   4955        gen_program_exception(s, PGM_SPECIFICATION);
   4956        return DISAS_NORETURN;
   4957    }
   4958    l = tcg_const_i32(l1);
   4959    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
   4960    tcg_temp_free_i32(l);
   4961    set_cc_static(s);
   4962    return DISAS_NEXT;
   4963}
   4964
   4965
   4966static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
   4967{
   4968    int d1 = get_field(s, d1);
   4969    int d2 = get_field(s, d2);
   4970    int b1 = get_field(s, b1);
   4971    int b2 = get_field(s, b2);
   4972    int l = get_field(s, l1);
   4973    TCGv_i32 t32;
   4974
   4975    o->addr1 = get_address(s, 0, b1, d1);
   4976
   4977    /* If the addresses are identical, this is a store/memset of zero.  */
   4978    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
   4979        o->in2 = tcg_const_i64(0);
   4980
   4981        l++;
   4982        while (l >= 8) {
   4983            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
   4984            l -= 8;
   4985            if (l > 0) {
   4986                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
   4987            }
   4988        }
   4989        if (l >= 4) {
   4990            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
   4991            l -= 4;
   4992            if (l > 0) {
   4993                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
   4994            }
   4995        }
   4996        if (l >= 2) {
   4997            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
   4998            l -= 2;
   4999            if (l > 0) {
   5000                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
   5001            }
   5002        }
   5003        if (l) {
   5004            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
   5005        }
   5006        gen_op_movi_cc(s, 0);
   5007        return DISAS_NEXT;
   5008    }
   5009
   5010    /* But in general we'll defer to a helper.  */
   5011    o->in2 = get_address(s, 0, b2, d2);
   5012    t32 = tcg_const_i32(l);
   5013    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
   5014    tcg_temp_free_i32(t32);
   5015    set_cc_static(s);
   5016    return DISAS_NEXT;
   5017}
   5018
   5019static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
   5020{
   5021    tcg_gen_xor_i64(o->out, o->in1, o->in2);
   5022    return DISAS_NEXT;
   5023}
   5024
   5025static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
   5026{
   5027    int shift = s->insn->data & 0xff;
   5028    int size = s->insn->data >> 8;
   5029    uint64_t mask = ((1ull << size) - 1) << shift;
   5030
   5031    assert(!o->g_in2);
   5032    tcg_gen_shli_i64(o->in2, o->in2, shift);
   5033    tcg_gen_xor_i64(o->out, o->in1, o->in2);
   5034
   5035    /* Produce the CC from only the bits manipulated.  */
   5036    tcg_gen_andi_i64(cc_dst, o->out, mask);
   5037    set_cc_nz_u64(s, cc_dst);
   5038    return DISAS_NEXT;
   5039}
   5040
   5041static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
   5042{
   5043    o->in1 = tcg_temp_new_i64();
   5044
   5045    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
   5046        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
   5047    } else {
   5048        /* Perform the atomic operation in memory. */
   5049        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
   5050                                     s->insn->data);
   5051    }
   5052
   5053    /* Recompute also for atomic case: needed for setting CC. */
   5054    tcg_gen_xor_i64(o->out, o->in1, o->in2);
   5055
   5056    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
   5057        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
   5058    }
   5059    return DISAS_NEXT;
   5060}
   5061
   5062static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
   5063{
   5064    o->out = tcg_const_i64(0);
   5065    return DISAS_NEXT;
   5066}
   5067
   5068static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
   5069{
   5070    o->out = tcg_const_i64(0);
   5071    o->out2 = o->out;
   5072    o->g_out2 = true;
   5073    return DISAS_NEXT;
   5074}
   5075
   5076#ifndef CONFIG_USER_ONLY
   5077static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
   5078{
   5079    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   5080
   5081    gen_helper_clp(cpu_env, r2);
   5082    tcg_temp_free_i32(r2);
   5083    set_cc_static(s);
   5084    return DISAS_NEXT;
   5085}
   5086
   5087static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
   5088{
   5089    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   5090    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   5091
   5092    gen_helper_pcilg(cpu_env, r1, r2);
   5093    tcg_temp_free_i32(r1);
   5094    tcg_temp_free_i32(r2);
   5095    set_cc_static(s);
   5096    return DISAS_NEXT;
   5097}
   5098
   5099static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
   5100{
   5101    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   5102    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   5103
   5104    gen_helper_pcistg(cpu_env, r1, r2);
   5105    tcg_temp_free_i32(r1);
   5106    tcg_temp_free_i32(r2);
   5107    set_cc_static(s);
   5108    return DISAS_NEXT;
   5109}
   5110
   5111static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
   5112{
   5113    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   5114    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
   5115
   5116    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
   5117    tcg_temp_free_i32(ar);
   5118    tcg_temp_free_i32(r1);
   5119    set_cc_static(s);
   5120    return DISAS_NEXT;
   5121}
   5122
   5123static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
   5124{
   5125    gen_helper_sic(cpu_env, o->in1, o->in2);
   5126    return DISAS_NEXT;
   5127}
   5128
   5129static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
   5130{
   5131    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   5132    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
   5133
   5134    gen_helper_rpcit(cpu_env, r1, r2);
   5135    tcg_temp_free_i32(r1);
   5136    tcg_temp_free_i32(r2);
   5137    set_cc_static(s);
   5138    return DISAS_NEXT;
   5139}
   5140
   5141static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
   5142{
   5143    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   5144    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
   5145    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
   5146
   5147    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
   5148    tcg_temp_free_i32(ar);
   5149    tcg_temp_free_i32(r1);
   5150    tcg_temp_free_i32(r3);
   5151    set_cc_static(s);
   5152    return DISAS_NEXT;
   5153}
   5154
   5155static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
   5156{
   5157    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
   5158    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
   5159
   5160    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
   5161    tcg_temp_free_i32(ar);
   5162    tcg_temp_free_i32(r1);
   5163    set_cc_static(s);
   5164    return DISAS_NEXT;
   5165}
   5166#endif
   5167
   5168#include "translate_vx.c.inc"
   5169
   5170/* ====================================================================== */
   5171/* The "Cc OUTput" generators.  Given the generated output (and in some cases
   5172   the original inputs), update the various cc data structures in order to
   5173   be able to compute the new condition code.  */
   5174
   5175static void cout_abs32(DisasContext *s, DisasOps *o)
   5176{
   5177    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
   5178}
   5179
   5180static void cout_abs64(DisasContext *s, DisasOps *o)
   5181{
   5182    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
   5183}
   5184
   5185static void cout_adds32(DisasContext *s, DisasOps *o)
   5186{
   5187    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
   5188}
   5189
   5190static void cout_adds64(DisasContext *s, DisasOps *o)
   5191{
   5192    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
   5193}
   5194
   5195static void cout_addu32(DisasContext *s, DisasOps *o)
   5196{
   5197    tcg_gen_shri_i64(cc_src, o->out, 32);
   5198    tcg_gen_ext32u_i64(cc_dst, o->out);
   5199    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
   5200}
   5201
   5202static void cout_addu64(DisasContext *s, DisasOps *o)
   5203{
   5204    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
   5205}
   5206
   5207static void cout_cmps32(DisasContext *s, DisasOps *o)
   5208{
   5209    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
   5210}
   5211
   5212static void cout_cmps64(DisasContext *s, DisasOps *o)
   5213{
   5214    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
   5215}
   5216
   5217static void cout_cmpu32(DisasContext *s, DisasOps *o)
   5218{
   5219    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
   5220}
   5221
   5222static void cout_cmpu64(DisasContext *s, DisasOps *o)
   5223{
   5224    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
   5225}
   5226
   5227static void cout_f32(DisasContext *s, DisasOps *o)
   5228{
   5229    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
   5230}
   5231
   5232static void cout_f64(DisasContext *s, DisasOps *o)
   5233{
   5234    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
   5235}
   5236
   5237static void cout_f128(DisasContext *s, DisasOps *o)
   5238{
   5239    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
   5240}
   5241
   5242static void cout_nabs32(DisasContext *s, DisasOps *o)
   5243{
   5244    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
   5245}
   5246
   5247static void cout_nabs64(DisasContext *s, DisasOps *o)
   5248{
   5249    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
   5250}
   5251
   5252static void cout_neg32(DisasContext *s, DisasOps *o)
   5253{
   5254    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
   5255}
   5256
   5257static void cout_neg64(DisasContext *s, DisasOps *o)
   5258{
   5259    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
   5260}
   5261
   5262static void cout_nz32(DisasContext *s, DisasOps *o)
   5263{
   5264    tcg_gen_ext32u_i64(cc_dst, o->out);
   5265    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
   5266}
   5267
   5268static void cout_nz64(DisasContext *s, DisasOps *o)
   5269{
   5270    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
   5271}
   5272
   5273static void cout_s32(DisasContext *s, DisasOps *o)
   5274{
   5275    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
   5276}
   5277
   5278static void cout_s64(DisasContext *s, DisasOps *o)
   5279{
   5280    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
   5281}
   5282
   5283static void cout_subs32(DisasContext *s, DisasOps *o)
   5284{
   5285    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
   5286}
   5287
   5288static void cout_subs64(DisasContext *s, DisasOps *o)
   5289{
   5290    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
   5291}
   5292
   5293static void cout_subu32(DisasContext *s, DisasOps *o)
   5294{
   5295    tcg_gen_sari_i64(cc_src, o->out, 32);
   5296    tcg_gen_ext32u_i64(cc_dst, o->out);
   5297    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
   5298}
   5299
   5300static void cout_subu64(DisasContext *s, DisasOps *o)
   5301{
   5302    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
   5303}
   5304
   5305static void cout_tm32(DisasContext *s, DisasOps *o)
   5306{
   5307    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
   5308}
   5309
   5310static void cout_tm64(DisasContext *s, DisasOps *o)
   5311{
   5312    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
   5313}
   5314
   5315static void cout_muls32(DisasContext *s, DisasOps *o)
   5316{
   5317    gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
   5318}
   5319
   5320static void cout_muls64(DisasContext *s, DisasOps *o)
   5321{
   5322    /* out contains "high" part, out2 contains "low" part of 128 bit result */
   5323    gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
   5324}
   5325
   5326/* ====================================================================== */
   5327/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
   5328   with the TCG register to which we will write.  Used in combination with
   5329   the "wout" generators, in some cases we need a new temporary, and in
   5330   some cases we can write to a TCG global.  */
   5331
   5332static void prep_new(DisasContext *s, DisasOps *o)
   5333{
   5334    o->out = tcg_temp_new_i64();
   5335}
   5336#define SPEC_prep_new 0
   5337
   5338static void prep_new_P(DisasContext *s, DisasOps *o)
   5339{
   5340    o->out = tcg_temp_new_i64();
   5341    o->out2 = tcg_temp_new_i64();
   5342}
   5343#define SPEC_prep_new_P 0
   5344
   5345static void prep_r1(DisasContext *s, DisasOps *o)
   5346{
   5347    o->out = regs[get_field(s, r1)];
   5348    o->g_out = true;
   5349}
   5350#define SPEC_prep_r1 0
   5351
   5352static void prep_r1_P(DisasContext *s, DisasOps *o)
   5353{
   5354    int r1 = get_field(s, r1);
   5355    o->out = regs[r1];
   5356    o->out2 = regs[r1 + 1];
   5357    o->g_out = o->g_out2 = true;
   5358}
   5359#define SPEC_prep_r1_P SPEC_r1_even
   5360
   5361/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
   5362static void prep_x1(DisasContext *s, DisasOps *o)
   5363{
   5364    o->out = load_freg(get_field(s, r1));
   5365    o->out2 = load_freg(get_field(s, r1) + 2);
   5366}
   5367#define SPEC_prep_x1 SPEC_r1_f128
   5368
   5369/* ====================================================================== */
   5370/* The "Write OUTput" generators.  These generally perform some non-trivial
   5371   copy of data to TCG globals, or to main memory.  The trivial cases are
   5372   generally handled by having a "prep" generator install the TCG global
   5373   as the destination of the operation.  */
   5374
   5375static void wout_r1(DisasContext *s, DisasOps *o)
   5376{
   5377    store_reg(get_field(s, r1), o->out);
   5378}
   5379#define SPEC_wout_r1 0
   5380
   5381static void wout_out2_r1(DisasContext *s, DisasOps *o)
   5382{
   5383    store_reg(get_field(s, r1), o->out2);
   5384}
   5385#define SPEC_wout_out2_r1 0
   5386
   5387static void wout_r1_8(DisasContext *s, DisasOps *o)
   5388{
   5389    int r1 = get_field(s, r1);
   5390    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
   5391}
   5392#define SPEC_wout_r1_8 0
   5393
   5394static void wout_r1_16(DisasContext *s, DisasOps *o)
   5395{
   5396    int r1 = get_field(s, r1);
   5397    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
   5398}
   5399#define SPEC_wout_r1_16 0
   5400
   5401static void wout_r1_32(DisasContext *s, DisasOps *o)
   5402{
   5403    store_reg32_i64(get_field(s, r1), o->out);
   5404}
   5405#define SPEC_wout_r1_32 0
   5406
   5407static void wout_r1_32h(DisasContext *s, DisasOps *o)
   5408{
   5409    store_reg32h_i64(get_field(s, r1), o->out);
   5410}
   5411#define SPEC_wout_r1_32h 0
   5412
   5413static void wout_r1_P32(DisasContext *s, DisasOps *o)
   5414{
   5415    int r1 = get_field(s, r1);
   5416    store_reg32_i64(r1, o->out);
   5417    store_reg32_i64(r1 + 1, o->out2);
   5418}
   5419#define SPEC_wout_r1_P32 SPEC_r1_even
   5420
   5421static void wout_r1_D32(DisasContext *s, DisasOps *o)
   5422{
   5423    int r1 = get_field(s, r1);
   5424    store_reg32_i64(r1 + 1, o->out);
   5425    tcg_gen_shri_i64(o->out, o->out, 32);
   5426    store_reg32_i64(r1, o->out);
   5427}
   5428#define SPEC_wout_r1_D32 SPEC_r1_even
   5429
   5430static void wout_r3_P32(DisasContext *s, DisasOps *o)
   5431{
   5432    int r3 = get_field(s, r3);
   5433    store_reg32_i64(r3, o->out);
   5434    store_reg32_i64(r3 + 1, o->out2);
   5435}
   5436#define SPEC_wout_r3_P32 SPEC_r3_even
   5437
   5438static void wout_r3_P64(DisasContext *s, DisasOps *o)
   5439{
   5440    int r3 = get_field(s, r3);
   5441    store_reg(r3, o->out);
   5442    store_reg(r3 + 1, o->out2);
   5443}
   5444#define SPEC_wout_r3_P64 SPEC_r3_even
   5445
   5446static void wout_e1(DisasContext *s, DisasOps *o)
   5447{
   5448    store_freg32_i64(get_field(s, r1), o->out);
   5449}
   5450#define SPEC_wout_e1 0
   5451
   5452static void wout_f1(DisasContext *s, DisasOps *o)
   5453{
   5454    store_freg(get_field(s, r1), o->out);
   5455}
   5456#define SPEC_wout_f1 0
   5457
   5458static void wout_x1(DisasContext *s, DisasOps *o)
   5459{
   5460    int f1 = get_field(s, r1);
   5461    store_freg(f1, o->out);
   5462    store_freg(f1 + 2, o->out2);
   5463}
   5464#define SPEC_wout_x1 SPEC_r1_f128
   5465
   5466static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
   5467{
   5468    if (get_field(s, r1) != get_field(s, r2)) {
   5469        store_reg32_i64(get_field(s, r1), o->out);
   5470    }
   5471}
   5472#define SPEC_wout_cond_r1r2_32 0
   5473
   5474static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
   5475{
   5476    if (get_field(s, r1) != get_field(s, r2)) {
   5477        store_freg32_i64(get_field(s, r1), o->out);
   5478    }
   5479}
   5480#define SPEC_wout_cond_e1e2 0
   5481
   5482static void wout_m1_8(DisasContext *s, DisasOps *o)
   5483{
   5484    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
   5485}
   5486#define SPEC_wout_m1_8 0
   5487
   5488static void wout_m1_16(DisasContext *s, DisasOps *o)
   5489{
   5490    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
   5491}
   5492#define SPEC_wout_m1_16 0
   5493
   5494#ifndef CONFIG_USER_ONLY
   5495static void wout_m1_16a(DisasContext *s, DisasOps *o)
   5496{
   5497    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
   5498}
   5499#define SPEC_wout_m1_16a 0
   5500#endif
   5501
   5502static void wout_m1_32(DisasContext *s, DisasOps *o)
   5503{
   5504    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
   5505}
   5506#define SPEC_wout_m1_32 0
   5507
   5508#ifndef CONFIG_USER_ONLY
   5509static void wout_m1_32a(DisasContext *s, DisasOps *o)
   5510{
   5511    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
   5512}
   5513#define SPEC_wout_m1_32a 0
   5514#endif
   5515
   5516static void wout_m1_64(DisasContext *s, DisasOps *o)
   5517{
   5518    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
   5519}
   5520#define SPEC_wout_m1_64 0
   5521
   5522#ifndef CONFIG_USER_ONLY
   5523static void wout_m1_64a(DisasContext *s, DisasOps *o)
   5524{
   5525    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
   5526}
   5527#define SPEC_wout_m1_64a 0
   5528#endif
   5529
   5530static void wout_m2_32(DisasContext *s, DisasOps *o)
   5531{
   5532    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
   5533}
   5534#define SPEC_wout_m2_32 0
   5535
   5536static void wout_in2_r1(DisasContext *s, DisasOps *o)
   5537{
   5538    store_reg(get_field(s, r1), o->in2);
   5539}
   5540#define SPEC_wout_in2_r1 0
   5541
   5542static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
   5543{
   5544    store_reg32_i64(get_field(s, r1), o->in2);
   5545}
   5546#define SPEC_wout_in2_r1_32 0
   5547
   5548/* ====================================================================== */
   5549/* The "INput 1" generators.  These load the first operand to an insn.  */
   5550
   5551static void in1_r1(DisasContext *s, DisasOps *o)
   5552{
   5553    o->in1 = load_reg(get_field(s, r1));
   5554}
   5555#define SPEC_in1_r1 0
   5556
   5557static void in1_r1_o(DisasContext *s, DisasOps *o)
   5558{
   5559    o->in1 = regs[get_field(s, r1)];
   5560    o->g_in1 = true;
   5561}
   5562#define SPEC_in1_r1_o 0
   5563
   5564static void in1_r1_32s(DisasContext *s, DisasOps *o)
   5565{
   5566    o->in1 = tcg_temp_new_i64();
   5567    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
   5568}
   5569#define SPEC_in1_r1_32s 0
   5570
   5571static void in1_r1_32u(DisasContext *s, DisasOps *o)
   5572{
   5573    o->in1 = tcg_temp_new_i64();
   5574    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
   5575}
   5576#define SPEC_in1_r1_32u 0
   5577
   5578static void in1_r1_sr32(DisasContext *s, DisasOps *o)
   5579{
   5580    o->in1 = tcg_temp_new_i64();
   5581    tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
   5582}
   5583#define SPEC_in1_r1_sr32 0
   5584
   5585static void in1_r1p1(DisasContext *s, DisasOps *o)
   5586{
   5587    o->in1 = load_reg(get_field(s, r1) + 1);
   5588}
   5589#define SPEC_in1_r1p1 SPEC_r1_even
   5590
   5591static void in1_r1p1_o(DisasContext *s, DisasOps *o)
   5592{
   5593    o->in1 = regs[get_field(s, r1) + 1];
   5594    o->g_in1 = true;
   5595}
   5596#define SPEC_in1_r1p1_o SPEC_r1_even
   5597
   5598static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
   5599{
   5600    o->in1 = tcg_temp_new_i64();
   5601    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
   5602}
   5603#define SPEC_in1_r1p1_32s SPEC_r1_even
   5604
   5605static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
   5606{
   5607    o->in1 = tcg_temp_new_i64();
   5608    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
   5609}
   5610#define SPEC_in1_r1p1_32u SPEC_r1_even
   5611
   5612static void in1_r1_D32(DisasContext *s, DisasOps *o)
   5613{
   5614    int r1 = get_field(s, r1);
   5615    o->in1 = tcg_temp_new_i64();
   5616    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
   5617}
   5618#define SPEC_in1_r1_D32 SPEC_r1_even
   5619
   5620static void in1_r2(DisasContext *s, DisasOps *o)
   5621{
   5622    o->in1 = load_reg(get_field(s, r2));
   5623}
   5624#define SPEC_in1_r2 0
   5625
   5626static void in1_r2_sr32(DisasContext *s, DisasOps *o)
   5627{
   5628    o->in1 = tcg_temp_new_i64();
   5629    tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
   5630}
   5631#define SPEC_in1_r2_sr32 0
   5632
   5633static void in1_r2_32u(DisasContext *s, DisasOps *o)
   5634{
   5635    o->in1 = tcg_temp_new_i64();
   5636    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
   5637}
   5638#define SPEC_in1_r2_32u 0
   5639
   5640static void in1_r3(DisasContext *s, DisasOps *o)
   5641{
   5642    o->in1 = load_reg(get_field(s, r3));
   5643}
   5644#define SPEC_in1_r3 0
   5645
   5646static void in1_r3_o(DisasContext *s, DisasOps *o)
   5647{
   5648    o->in1 = regs[get_field(s, r3)];
   5649    o->g_in1 = true;
   5650}
   5651#define SPEC_in1_r3_o 0
   5652
   5653static void in1_r3_32s(DisasContext *s, DisasOps *o)
   5654{
   5655    o->in1 = tcg_temp_new_i64();
   5656    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
   5657}
   5658#define SPEC_in1_r3_32s 0
   5659
   5660static void in1_r3_32u(DisasContext *s, DisasOps *o)
   5661{
   5662    o->in1 = tcg_temp_new_i64();
   5663    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
   5664}
   5665#define SPEC_in1_r3_32u 0
   5666
   5667static void in1_r3_D32(DisasContext *s, DisasOps *o)
   5668{
   5669    int r3 = get_field(s, r3);
   5670    o->in1 = tcg_temp_new_i64();
   5671    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
   5672}
   5673#define SPEC_in1_r3_D32 SPEC_r3_even
   5674
   5675static void in1_e1(DisasContext *s, DisasOps *o)
   5676{
   5677    o->in1 = load_freg32_i64(get_field(s, r1));
   5678}
   5679#define SPEC_in1_e1 0
   5680
   5681static void in1_f1(DisasContext *s, DisasOps *o)
   5682{
   5683    o->in1 = load_freg(get_field(s, r1));
   5684}
   5685#define SPEC_in1_f1 0
   5686
   5687/* Load the high double word of an extended (128-bit) format FP number */
   5688static void in1_x2h(DisasContext *s, DisasOps *o)
   5689{
   5690    o->in1 = load_freg(get_field(s, r2));
   5691}
   5692#define SPEC_in1_x2h SPEC_r2_f128
   5693
   5694static void in1_f3(DisasContext *s, DisasOps *o)
   5695{
   5696    o->in1 = load_freg(get_field(s, r3));
   5697}
   5698#define SPEC_in1_f3 0
   5699
   5700static void in1_la1(DisasContext *s, DisasOps *o)
   5701{
   5702    o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
   5703}
   5704#define SPEC_in1_la1 0
   5705
   5706static void in1_la2(DisasContext *s, DisasOps *o)
   5707{
   5708    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
   5709    o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
   5710}
   5711#define SPEC_in1_la2 0
   5712
   5713static void in1_m1_8u(DisasContext *s, DisasOps *o)
   5714{
   5715    in1_la1(s, o);
   5716    o->in1 = tcg_temp_new_i64();
   5717    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
   5718}
   5719#define SPEC_in1_m1_8u 0
   5720
   5721static void in1_m1_16s(DisasContext *s, DisasOps *o)
   5722{
   5723    in1_la1(s, o);
   5724    o->in1 = tcg_temp_new_i64();
   5725    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
   5726}
   5727#define SPEC_in1_m1_16s 0
   5728
   5729static void in1_m1_16u(DisasContext *s, DisasOps *o)
   5730{
   5731    in1_la1(s, o);
   5732    o->in1 = tcg_temp_new_i64();
   5733    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
   5734}
   5735#define SPEC_in1_m1_16u 0
   5736
   5737static void in1_m1_32s(DisasContext *s, DisasOps *o)
   5738{
   5739    in1_la1(s, o);
   5740    o->in1 = tcg_temp_new_i64();
   5741    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
   5742}
   5743#define SPEC_in1_m1_32s 0
   5744
   5745static void in1_m1_32u(DisasContext *s, DisasOps *o)
   5746{
   5747    in1_la1(s, o);
   5748    o->in1 = tcg_temp_new_i64();
   5749    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
   5750}
   5751#define SPEC_in1_m1_32u 0
   5752
   5753static void in1_m1_64(DisasContext *s, DisasOps *o)
   5754{
   5755    in1_la1(s, o);
   5756    o->in1 = tcg_temp_new_i64();
   5757    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
   5758}
   5759#define SPEC_in1_m1_64 0
   5760
   5761/* ====================================================================== */
   5762/* The "INput 2" generators.  These load the second operand to an insn.  */
   5763
   5764static void in2_r1_o(DisasContext *s, DisasOps *o)
   5765{
   5766    o->in2 = regs[get_field(s, r1)];
   5767    o->g_in2 = true;
   5768}
   5769#define SPEC_in2_r1_o 0
   5770
   5771static void in2_r1_16u(DisasContext *s, DisasOps *o)
   5772{
   5773    o->in2 = tcg_temp_new_i64();
   5774    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
   5775}
   5776#define SPEC_in2_r1_16u 0
   5777
   5778static void in2_r1_32u(DisasContext *s, DisasOps *o)
   5779{
   5780    o->in2 = tcg_temp_new_i64();
   5781    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
   5782}
   5783#define SPEC_in2_r1_32u 0
   5784
   5785static void in2_r1_D32(DisasContext *s, DisasOps *o)
   5786{
   5787    int r1 = get_field(s, r1);
   5788    o->in2 = tcg_temp_new_i64();
   5789    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
   5790}
   5791#define SPEC_in2_r1_D32 SPEC_r1_even
   5792
   5793static void in2_r2(DisasContext *s, DisasOps *o)
   5794{
   5795    o->in2 = load_reg(get_field(s, r2));
   5796}
   5797#define SPEC_in2_r2 0
   5798
   5799static void in2_r2_o(DisasContext *s, DisasOps *o)
   5800{
   5801    o->in2 = regs[get_field(s, r2)];
   5802    o->g_in2 = true;
   5803}
   5804#define SPEC_in2_r2_o 0
   5805
   5806static void in2_r2_nz(DisasContext *s, DisasOps *o)
   5807{
   5808    int r2 = get_field(s, r2);
   5809    if (r2 != 0) {
   5810        o->in2 = load_reg(r2);
   5811    }
   5812}
   5813#define SPEC_in2_r2_nz 0
   5814
   5815static void in2_r2_8s(DisasContext *s, DisasOps *o)
   5816{
   5817    o->in2 = tcg_temp_new_i64();
   5818    tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
   5819}
   5820#define SPEC_in2_r2_8s 0
   5821
   5822static void in2_r2_8u(DisasContext *s, DisasOps *o)
   5823{
   5824    o->in2 = tcg_temp_new_i64();
   5825    tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
   5826}
   5827#define SPEC_in2_r2_8u 0
   5828
   5829static void in2_r2_16s(DisasContext *s, DisasOps *o)
   5830{
   5831    o->in2 = tcg_temp_new_i64();
   5832    tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
   5833}
   5834#define SPEC_in2_r2_16s 0
   5835
   5836static void in2_r2_16u(DisasContext *s, DisasOps *o)
   5837{
   5838    o->in2 = tcg_temp_new_i64();
   5839    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
   5840}
   5841#define SPEC_in2_r2_16u 0
   5842
   5843static void in2_r3(DisasContext *s, DisasOps *o)
   5844{
   5845    o->in2 = load_reg(get_field(s, r3));
   5846}
   5847#define SPEC_in2_r3 0
   5848
   5849static void in2_r3_sr32(DisasContext *s, DisasOps *o)
   5850{
   5851    o->in2 = tcg_temp_new_i64();
   5852    tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
   5853}
   5854#define SPEC_in2_r3_sr32 0
   5855
   5856static void in2_r3_32u(DisasContext *s, DisasOps *o)
   5857{
   5858    o->in2 = tcg_temp_new_i64();
   5859    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
   5860}
   5861#define SPEC_in2_r3_32u 0
   5862
   5863static void in2_r2_32s(DisasContext *s, DisasOps *o)
   5864{
   5865    o->in2 = tcg_temp_new_i64();
   5866    tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
   5867}
   5868#define SPEC_in2_r2_32s 0
   5869
   5870static void in2_r2_32u(DisasContext *s, DisasOps *o)
   5871{
   5872    o->in2 = tcg_temp_new_i64();
   5873    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
   5874}
   5875#define SPEC_in2_r2_32u 0
   5876
   5877static void in2_r2_sr32(DisasContext *s, DisasOps *o)
   5878{
   5879    o->in2 = tcg_temp_new_i64();
   5880    tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
   5881}
   5882#define SPEC_in2_r2_sr32 0
   5883
   5884static void in2_e2(DisasContext *s, DisasOps *o)
   5885{
   5886    o->in2 = load_freg32_i64(get_field(s, r2));
   5887}
   5888#define SPEC_in2_e2 0
   5889
   5890static void in2_f2(DisasContext *s, DisasOps *o)
   5891{
   5892    o->in2 = load_freg(get_field(s, r2));
   5893}
   5894#define SPEC_in2_f2 0
   5895
   5896/* Load the low double word of an extended (128-bit) format FP number */
   5897static void in2_x2l(DisasContext *s, DisasOps *o)
   5898{
   5899    o->in2 = load_freg(get_field(s, r2) + 2);
   5900}
   5901#define SPEC_in2_x2l SPEC_r2_f128
   5902
   5903static void in2_ra2(DisasContext *s, DisasOps *o)
   5904{
   5905    int r2 = get_field(s, r2);
   5906
   5907    /* Note: *don't* treat !r2 as 0, use the reg value. */
   5908    o->in2 = tcg_temp_new_i64();
   5909    gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
   5910}
   5911#define SPEC_in2_ra2 0
   5912
   5913static void in2_a2(DisasContext *s, DisasOps *o)
   5914{
   5915    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
   5916    o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
   5917}
   5918#define SPEC_in2_a2 0
   5919
   5920static void in2_ri2(DisasContext *s, DisasOps *o)
   5921{
   5922    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
   5923}
   5924#define SPEC_in2_ri2 0
   5925
   5926static void in2_sh32(DisasContext *s, DisasOps *o)
   5927{
   5928    help_l2_shift(s, o, 31);
   5929}
   5930#define SPEC_in2_sh32 0
   5931
   5932static void in2_sh64(DisasContext *s, DisasOps *o)
   5933{
   5934    help_l2_shift(s, o, 63);
   5935}
   5936#define SPEC_in2_sh64 0
   5937
   5938static void in2_m2_8u(DisasContext *s, DisasOps *o)
   5939{
   5940    in2_a2(s, o);
   5941    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
   5942}
   5943#define SPEC_in2_m2_8u 0
   5944
   5945static void in2_m2_16s(DisasContext *s, DisasOps *o)
   5946{
   5947    in2_a2(s, o);
   5948    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
   5949}
   5950#define SPEC_in2_m2_16s 0
   5951
   5952static void in2_m2_16u(DisasContext *s, DisasOps *o)
   5953{
   5954    in2_a2(s, o);
   5955    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
   5956}
   5957#define SPEC_in2_m2_16u 0
   5958
   5959static void in2_m2_32s(DisasContext *s, DisasOps *o)
   5960{
   5961    in2_a2(s, o);
   5962    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
   5963}
   5964#define SPEC_in2_m2_32s 0
   5965
   5966static void in2_m2_32u(DisasContext *s, DisasOps *o)
   5967{
   5968    in2_a2(s, o);
   5969    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
   5970}
   5971#define SPEC_in2_m2_32u 0
   5972
   5973#ifndef CONFIG_USER_ONLY
   5974static void in2_m2_32ua(DisasContext *s, DisasOps *o)
   5975{
   5976    in2_a2(s, o);
   5977    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
   5978}
   5979#define SPEC_in2_m2_32ua 0
   5980#endif
   5981
   5982static void in2_m2_64(DisasContext *s, DisasOps *o)
   5983{
   5984    in2_a2(s, o);
   5985    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
   5986}
   5987#define SPEC_in2_m2_64 0
   5988
   5989static void in2_m2_64w(DisasContext *s, DisasOps *o)
   5990{
   5991    in2_a2(s, o);
   5992    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
   5993    gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
   5994}
   5995#define SPEC_in2_m2_64w 0
   5996
   5997#ifndef CONFIG_USER_ONLY
   5998static void in2_m2_64a(DisasContext *s, DisasOps *o)
   5999{
   6000    in2_a2(s, o);
   6001    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
   6002}
   6003#define SPEC_in2_m2_64a 0
   6004#endif
   6005
   6006static void in2_mri2_16u(DisasContext *s, DisasOps *o)
   6007{
   6008    in2_ri2(s, o);
   6009    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
   6010}
   6011#define SPEC_in2_mri2_16u 0
   6012
   6013static void in2_mri2_32s(DisasContext *s, DisasOps *o)
   6014{
   6015    in2_ri2(s, o);
   6016    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
   6017}
   6018#define SPEC_in2_mri2_32s 0
   6019
   6020static void in2_mri2_32u(DisasContext *s, DisasOps *o)
   6021{
   6022    in2_ri2(s, o);
   6023    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
   6024}
   6025#define SPEC_in2_mri2_32u 0
   6026
   6027static void in2_mri2_64(DisasContext *s, DisasOps *o)
   6028{
   6029    in2_ri2(s, o);
   6030    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
   6031}
   6032#define SPEC_in2_mri2_64 0
   6033
   6034static void in2_i2(DisasContext *s, DisasOps *o)
   6035{
   6036    o->in2 = tcg_const_i64(get_field(s, i2));
   6037}
   6038#define SPEC_in2_i2 0
   6039
   6040static void in2_i2_8u(DisasContext *s, DisasOps *o)
   6041{
   6042    o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
   6043}
   6044#define SPEC_in2_i2_8u 0
   6045
   6046static void in2_i2_16u(DisasContext *s, DisasOps *o)
   6047{
   6048    o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
   6049}
   6050#define SPEC_in2_i2_16u 0
   6051
   6052static void in2_i2_32u(DisasContext *s, DisasOps *o)
   6053{
   6054    o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
   6055}
   6056#define SPEC_in2_i2_32u 0
   6057
   6058static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
   6059{
   6060    uint64_t i2 = (uint16_t)get_field(s, i2);
   6061    o->in2 = tcg_const_i64(i2 << s->insn->data);
   6062}
   6063#define SPEC_in2_i2_16u_shl 0
   6064
   6065static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
   6066{
   6067    uint64_t i2 = (uint32_t)get_field(s, i2);
   6068    o->in2 = tcg_const_i64(i2 << s->insn->data);
   6069}
   6070#define SPEC_in2_i2_32u_shl 0
   6071
   6072#ifndef CONFIG_USER_ONLY
   6073static void in2_insn(DisasContext *s, DisasOps *o)
   6074{
   6075    o->in2 = tcg_const_i64(s->fields.raw_insn);
   6076}
   6077#define SPEC_in2_insn 0
   6078#endif
   6079
   6080/* ====================================================================== */
   6081
   6082/* Find opc within the table of insns.  This is formulated as a switch
   6083   statement so that (1) we get compile-time notice of cut-paste errors
   6084   for duplicated opcodes, and (2) the compiler generates the binary
   6085   search tree, rather than us having to post-process the table.  */
   6086
   6087#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
   6088    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
   6089
   6090#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
   6091    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
   6092
   6093#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
   6094    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
   6095
   6096#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
   6097
   6098enum DisasInsnEnum {
   6099#include "insn-data.def"
   6100};
   6101
   6102#undef E
   6103#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
   6104    .opc = OPC,                                                             \
   6105    .flags = FL,                                                            \
   6106    .fmt = FMT_##FT,                                                        \
   6107    .fac = FAC_##FC,                                                        \
   6108    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
   6109    .name = #NM,                                                            \
   6110    .help_in1 = in1_##I1,                                                   \
   6111    .help_in2 = in2_##I2,                                                   \
   6112    .help_prep = prep_##P,                                                  \
   6113    .help_wout = wout_##W,                                                  \
   6114    .help_cout = cout_##CC,                                                 \
   6115    .help_op = op_##OP,                                                     \
   6116    .data = D                                                               \
   6117 },
   6118
   6119/* Allow 0 to be used for NULL in the table below.  */
   6120#define in1_0  NULL
   6121#define in2_0  NULL
   6122#define prep_0  NULL
   6123#define wout_0  NULL
   6124#define cout_0  NULL
   6125#define op_0  NULL
   6126
   6127#define SPEC_in1_0 0
   6128#define SPEC_in2_0 0
   6129#define SPEC_prep_0 0
   6130#define SPEC_wout_0 0
   6131
   6132/* Give smaller names to the various facilities.  */
   6133#define FAC_Z           S390_FEAT_ZARCH
   6134#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
   6135#define FAC_DFP         S390_FEAT_DFP
   6136#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
   6137#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
   6138#define FAC_EE          S390_FEAT_EXECUTE_EXT
   6139#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
   6140#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
   6141#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
   6142#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
   6143#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
   6144#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
   6145#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
   6146#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
   6147#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
   6148#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
   6149#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
   6150#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
   6151#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
   6152#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
   6153#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
   6154#define FAC_SFLE        S390_FEAT_STFLE
   6155#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
   6156#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
   6157#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
   6158#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
   6159#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
   6160#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
   6161#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
   6162#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
   6163#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
   6164#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
   6165#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
   6166#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
   6167#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
   6168#define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
   6169#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
   6170#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
   6171#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
   6172#define FAC_V           S390_FEAT_VECTOR /* vector facility */
   6173#define FAC_VE          S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
   6174#define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
   6175
   6176static const DisasInsn insn_info[] = {
   6177#include "insn-data.def"
   6178};
   6179
   6180#undef E
   6181#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
   6182    case OPC: return &insn_info[insn_ ## NM];
   6183
   6184static const DisasInsn *lookup_opc(uint16_t opc)
   6185{
   6186    switch (opc) {
   6187#include "insn-data.def"
   6188    default:
   6189        return NULL;
   6190    }
   6191}
   6192
   6193#undef F
   6194#undef E
   6195#undef D
   6196#undef C
   6197
   6198/* Extract a field from the insn.  The INSN should be left-aligned in
   6199   the uint64_t so that we can more easily utilize the big-bit-endian
   6200   definitions we extract from the Principals of Operation.  */
   6201
   6202static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
   6203{
   6204    uint32_t r, m;
   6205
   6206    if (f->size == 0) {
   6207        return;
   6208    }
   6209
   6210    /* Zero extract the field from the insn.  */
   6211    r = (insn << f->beg) >> (64 - f->size);
   6212
   6213    /* Sign-extend, or un-swap the field as necessary.  */
   6214    switch (f->type) {
   6215    case 0: /* unsigned */
   6216        break;
   6217    case 1: /* signed */
   6218        assert(f->size <= 32);
   6219        m = 1u << (f->size - 1);
   6220        r = (r ^ m) - m;
   6221        break;
   6222    case 2: /* dl+dh split, signed 20 bit. */
   6223        r = ((int8_t)r << 12) | (r >> 8);
   6224        break;
   6225    case 3: /* MSB stored in RXB */
   6226        g_assert(f->size == 4);
   6227        switch (f->beg) {
   6228        case 8:
   6229            r |= extract64(insn, 63 - 36, 1) << 4;
   6230            break;
   6231        case 12:
   6232            r |= extract64(insn, 63 - 37, 1) << 4;
   6233            break;
   6234        case 16:
   6235            r |= extract64(insn, 63 - 38, 1) << 4;
   6236            break;
   6237        case 32:
   6238            r |= extract64(insn, 63 - 39, 1) << 4;
   6239            break;
   6240        default:
   6241            g_assert_not_reached();
   6242        }
   6243        break;
   6244    default:
   6245        abort();
   6246    }
   6247
   6248    /*
   6249     * Validate that the "compressed" encoding we selected above is valid.
   6250     * I.e. we haven't made two different original fields overlap.
   6251     */
   6252    assert(((o->presentC >> f->indexC) & 1) == 0);
   6253    o->presentC |= 1 << f->indexC;
   6254    o->presentO |= 1 << f->indexO;
   6255
   6256    o->c[f->indexC] = r;
   6257}
   6258
   6259/* Lookup the insn at the current PC, extracting the operands into O and
   6260   returning the info struct for the insn.  Returns NULL for invalid insn.  */
   6261
   6262static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
   6263{
   6264    uint64_t insn, pc = s->base.pc_next;
   6265    int op, op2, ilen;
   6266    const DisasInsn *info;
   6267
   6268    if (unlikely(s->ex_value)) {
   6269        /* Drop the EX data now, so that it's clear on exception paths.  */
   6270        TCGv_i64 zero = tcg_const_i64(0);
   6271        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
   6272        tcg_temp_free_i64(zero);
   6273
   6274        /* Extract the values saved by EXECUTE.  */
   6275        insn = s->ex_value & 0xffffffffffff0000ull;
   6276        ilen = s->ex_value & 0xf;
   6277        op = insn >> 56;
   6278    } else {
   6279        insn = ld_code2(env, s, pc);
   6280        op = (insn >> 8) & 0xff;
   6281        ilen = get_ilen(op);
   6282        switch (ilen) {
   6283        case 2:
   6284            insn = insn << 48;
   6285            break;
   6286        case 4:
   6287            insn = ld_code4(env, s, pc) << 32;
   6288            break;
   6289        case 6:
   6290            insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
   6291            break;
   6292        default:
   6293            g_assert_not_reached();
   6294        }
   6295    }
   6296    s->pc_tmp = s->base.pc_next + ilen;
   6297    s->ilen = ilen;
   6298
   6299    /* We can't actually determine the insn format until we've looked up
   6300       the full insn opcode.  Which we can't do without locating the
   6301       secondary opcode.  Assume by default that OP2 is at bit 40; for
   6302       those smaller insns that don't actually have a secondary opcode
   6303       this will correctly result in OP2 = 0. */
   6304    switch (op) {
   6305    case 0x01: /* E */
   6306    case 0x80: /* S */
   6307    case 0x82: /* S */
   6308    case 0x93: /* S */
   6309    case 0xb2: /* S, RRF, RRE, IE */
   6310    case 0xb3: /* RRE, RRD, RRF */
   6311    case 0xb9: /* RRE, RRF */
   6312    case 0xe5: /* SSE, SIL */
   6313        op2 = (insn << 8) >> 56;
   6314        break;
   6315    case 0xa5: /* RI */
   6316    case 0xa7: /* RI */
   6317    case 0xc0: /* RIL */
   6318    case 0xc2: /* RIL */
   6319    case 0xc4: /* RIL */
   6320    case 0xc6: /* RIL */
   6321    case 0xc8: /* SSF */
   6322    case 0xcc: /* RIL */
   6323        op2 = (insn << 12) >> 60;
   6324        break;
   6325    case 0xc5: /* MII */
   6326    case 0xc7: /* SMI */
   6327    case 0xd0 ... 0xdf: /* SS */
   6328    case 0xe1: /* SS */
   6329    case 0xe2: /* SS */
   6330    case 0xe8: /* SS */
   6331    case 0xe9: /* SS */
   6332    case 0xea: /* SS */
   6333    case 0xee ... 0xf3: /* SS */
   6334    case 0xf8 ... 0xfd: /* SS */
   6335        op2 = 0;
   6336        break;
   6337    default:
   6338        op2 = (insn << 40) >> 56;
   6339        break;
   6340    }
   6341
   6342    memset(&s->fields, 0, sizeof(s->fields));
   6343    s->fields.raw_insn = insn;
   6344    s->fields.op = op;
   6345    s->fields.op2 = op2;
   6346
   6347    /* Lookup the instruction.  */
   6348    info = lookup_opc(op << 8 | op2);
   6349    s->insn = info;
   6350
   6351    /* If we found it, extract the operands.  */
   6352    if (info != NULL) {
   6353        DisasFormat fmt = info->fmt;
   6354        int i;
   6355
   6356        for (i = 0; i < NUM_C_FIELD; ++i) {
   6357            extract_field(&s->fields, &format_info[fmt].op[i], insn);
   6358        }
   6359    }
   6360    return info;
   6361}
   6362
   6363static bool is_afp_reg(int reg)
   6364{
   6365    return reg % 2 || reg > 6;
   6366}
   6367
   6368static bool is_fp_pair(int reg)
   6369{
   6370    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
   6371    return !(reg & 0x2);
   6372}
   6373
   6374static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
   6375{
   6376    const DisasInsn *insn;
   6377    DisasJumpType ret = DISAS_NEXT;
   6378    DisasOps o = {};
   6379    bool icount = false;
   6380
   6381    /* Search for the insn in the table.  */
   6382    insn = extract_insn(env, s);
   6383
   6384    /* Update insn_start now that we know the ILEN.  */
   6385    tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
   6386
   6387    /* Not found means unimplemented/illegal opcode.  */
   6388    if (insn == NULL) {
   6389        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
   6390                      s->fields.op, s->fields.op2);
   6391        gen_illegal_opcode(s);
   6392        ret = DISAS_NORETURN;
   6393        goto out;
   6394    }
   6395
   6396#ifndef CONFIG_USER_ONLY
   6397    if (s->base.tb->flags & FLAG_MASK_PER) {
   6398        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
   6399        gen_helper_per_ifetch(cpu_env, addr);
   6400        tcg_temp_free_i64(addr);
   6401    }
   6402#endif
   6403
   6404    /* process flags */
   6405    if (insn->flags) {
   6406        /* privileged instruction */
   6407        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
   6408            gen_program_exception(s, PGM_PRIVILEGED);
   6409            ret = DISAS_NORETURN;
   6410            goto out;
   6411        }
   6412
   6413        /* if AFP is not enabled, instructions and registers are forbidden */
   6414        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
   6415            uint8_t dxc = 0;
   6416
   6417            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
   6418                dxc = 1;
   6419            }
   6420            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
   6421                dxc = 1;
   6422            }
   6423            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
   6424                dxc = 1;
   6425            }
   6426            if (insn->flags & IF_BFP) {
   6427                dxc = 2;
   6428            }
   6429            if (insn->flags & IF_DFP) {
   6430                dxc = 3;
   6431            }
   6432            if (insn->flags & IF_VEC) {
   6433                dxc = 0xfe;
   6434            }
   6435            if (dxc) {
   6436                gen_data_exception(dxc);
   6437                ret = DISAS_NORETURN;
   6438                goto out;
   6439            }
   6440        }
   6441
   6442        /* if vector instructions not enabled, executing them is forbidden */
   6443        if (insn->flags & IF_VEC) {
   6444            if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
   6445                gen_data_exception(0xfe);
   6446                ret = DISAS_NORETURN;
   6447                goto out;
   6448            }
   6449        }
   6450
   6451        /* input/output is the special case for icount mode */
   6452        if (unlikely(insn->flags & IF_IO)) {
   6453            icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
   6454            if (icount) {
   6455                gen_io_start();
   6456            }
   6457        }
   6458    }
   6459
   6460    /* Check for insn specification exceptions.  */
   6461    if (insn->spec) {
   6462        if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
   6463            (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
   6464            (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
   6465            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
   6466            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
   6467            gen_program_exception(s, PGM_SPECIFICATION);
   6468            ret = DISAS_NORETURN;
   6469            goto out;
   6470        }
   6471    }
   6472
   6473    /* Implement the instruction.  */
   6474    if (insn->help_in1) {
   6475        insn->help_in1(s, &o);
   6476    }
   6477    if (insn->help_in2) {
   6478        insn->help_in2(s, &o);
   6479    }
   6480    if (insn->help_prep) {
   6481        insn->help_prep(s, &o);
   6482    }
   6483    if (insn->help_op) {
   6484        ret = insn->help_op(s, &o);
   6485    }
   6486    if (ret != DISAS_NORETURN) {
   6487        if (insn->help_wout) {
   6488            insn->help_wout(s, &o);
   6489        }
   6490        if (insn->help_cout) {
   6491            insn->help_cout(s, &o);
   6492        }
   6493    }
   6494
   6495    /* Free any temporaries created by the helpers.  */
   6496    if (o.out && !o.g_out) {
   6497        tcg_temp_free_i64(o.out);
   6498    }
   6499    if (o.out2 && !o.g_out2) {
   6500        tcg_temp_free_i64(o.out2);
   6501    }
   6502    if (o.in1 && !o.g_in1) {
   6503        tcg_temp_free_i64(o.in1);
   6504    }
   6505    if (o.in2 && !o.g_in2) {
   6506        tcg_temp_free_i64(o.in2);
   6507    }
   6508    if (o.addr1) {
   6509        tcg_temp_free_i64(o.addr1);
   6510    }
   6511
   6512    /* io should be the last instruction in tb when icount is enabled */
   6513    if (unlikely(icount && ret == DISAS_NEXT)) {
   6514        ret = DISAS_PC_STALE;
   6515    }
   6516
   6517#ifndef CONFIG_USER_ONLY
   6518    if (s->base.tb->flags & FLAG_MASK_PER) {
   6519        /* An exception might be triggered, save PSW if not already done.  */
   6520        if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
   6521            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
   6522        }
   6523
   6524        /* Call the helper to check for a possible PER exception.  */
   6525        gen_helper_per_check_exception(cpu_env);
   6526    }
   6527#endif
   6528
   6529out:
   6530    /* Advance to the next instruction.  */
   6531    s->base.pc_next = s->pc_tmp;
   6532    return ret;
   6533}
   6534
   6535static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
   6536{
   6537    DisasContext *dc = container_of(dcbase, DisasContext, base);
   6538
   6539    /* 31-bit mode */
   6540    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
   6541        dc->base.pc_first &= 0x7fffffff;
   6542        dc->base.pc_next = dc->base.pc_first;
   6543    }
   6544
   6545    dc->cc_op = CC_OP_DYNAMIC;
   6546    dc->ex_value = dc->base.tb->cs_base;
   6547    dc->do_debug = dc->base.singlestep_enabled;
   6548}
   6549
   6550static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
   6551{
   6552}
   6553
   6554static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
   6555{
   6556    DisasContext *dc = container_of(dcbase, DisasContext, base);
   6557
   6558    /* Delay the set of ilen until we've read the insn. */
   6559    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
   6560    dc->insn_start = tcg_last_op();
   6561}
   6562
   6563static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
   6564{
   6565    CPUS390XState *env = cs->env_ptr;
   6566    DisasContext *dc = container_of(dcbase, DisasContext, base);
   6567
   6568    dc->base.is_jmp = translate_one(env, dc);
   6569    if (dc->base.is_jmp == DISAS_NEXT) {
   6570        uint64_t page_start;
   6571
   6572        page_start = dc->base.pc_first & TARGET_PAGE_MASK;
   6573        if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
   6574            dc->base.is_jmp = DISAS_TOO_MANY;
   6575        }
   6576    }
   6577}
   6578
   6579static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
   6580{
   6581    DisasContext *dc = container_of(dcbase, DisasContext, base);
   6582
   6583    switch (dc->base.is_jmp) {
   6584    case DISAS_GOTO_TB:
   6585    case DISAS_NORETURN:
   6586        break;
   6587    case DISAS_TOO_MANY:
   6588    case DISAS_PC_STALE:
   6589    case DISAS_PC_STALE_NOCHAIN:
   6590        update_psw_addr(dc);
   6591        /* FALLTHRU */
   6592    case DISAS_PC_UPDATED:
   6593        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
   6594           cc op type is in env */
   6595        update_cc_op(dc);
   6596        /* FALLTHRU */
   6597    case DISAS_PC_CC_UPDATED:
   6598        /* Exit the TB, either by raising a debug exception or by return.  */
   6599        if (dc->do_debug) {
   6600            gen_exception(EXCP_DEBUG);
   6601        } else if ((dc->base.tb->flags & FLAG_MASK_PER) ||
   6602                   dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
   6603            tcg_gen_exit_tb(NULL, 0);
   6604        } else {
   6605            tcg_gen_lookup_and_goto_ptr();
   6606        }
   6607        break;
   6608    default:
   6609        g_assert_not_reached();
   6610    }
   6611}
   6612
   6613static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
   6614{
   6615    DisasContext *dc = container_of(dcbase, DisasContext, base);
   6616
   6617    if (unlikely(dc->ex_value)) {
   6618        /* ??? Unfortunately log_target_disas can't use host memory.  */
   6619        qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
   6620    } else {
   6621        qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
   6622        log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
   6623    }
   6624}
   6625
   6626static const TranslatorOps s390x_tr_ops = {
   6627    .init_disas_context = s390x_tr_init_disas_context,
   6628    .tb_start           = s390x_tr_tb_start,
   6629    .insn_start         = s390x_tr_insn_start,
   6630    .translate_insn     = s390x_tr_translate_insn,
   6631    .tb_stop            = s390x_tr_tb_stop,
   6632    .disas_log          = s390x_tr_disas_log,
   6633};
   6634
   6635void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
   6636{
   6637    DisasContext dc;
   6638
   6639    translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
   6640}
   6641
   6642void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
   6643                          target_ulong *data)
   6644{
   6645    int cc_op = data[1];
   6646
   6647    env->psw.addr = data[0];
   6648
   6649    /* Update the CC opcode if it is not already up-to-date.  */
   6650    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
   6651        env->cc_op = cc_op;
   6652    }
   6653
   6654    /* Record ILEN.  */
   6655    env->int_pgm_ilen = data[2];
   6656}