cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

tcg.h (45910B)


      1/*
      2 * Tiny Code Generator for QEMU
      3 *
      4 * Copyright (c) 2008 Fabrice Bellard
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a copy
      7 * of this software and associated documentation files (the "Software"), to deal
      8 * in the Software without restriction, including without limitation the rights
      9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10 * copies of the Software, and to permit persons to whom the Software is
     11 * furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22 * THE SOFTWARE.
     23 */
     24
     25#ifndef TCG_H
     26#define TCG_H
     27
     28#include "cpu.h"
     29#include "exec/memop.h"
     30#include "exec/memopidx.h"
     31#include "qemu/bitops.h"
     32#include "qemu/plugin.h"
     33#include "qemu/queue.h"
     34#include "tcg/tcg-mo.h"
     35#include "tcg-target.h"
     36#include "qemu/int128.h"
     37#include "tcg/tcg-cond.h"
     38
     39/* XXX: make safe guess about sizes */
     40#define MAX_OP_PER_INSTR 266
     41
     42#if HOST_LONG_BITS == 32
     43#define MAX_OPC_PARAM_PER_ARG 2
     44#else
     45#define MAX_OPC_PARAM_PER_ARG 1
     46#endif
     47#define MAX_OPC_PARAM_IARGS 6
     48#define MAX_OPC_PARAM_OARGS 1
     49#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
     50
     51/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
     52 * and up to 4 + N parameters on 64-bit archs
     53 * (N = number of input arguments + output arguments).  */
     54#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
     55
     56#define CPU_TEMP_BUF_NLONGS 128
     57#define TCG_STATIC_FRAME_SIZE  (CPU_TEMP_BUF_NLONGS * sizeof(long))
     58
     59/* Default target word size to pointer size.  */
     60#ifndef TCG_TARGET_REG_BITS
     61# if UINTPTR_MAX == UINT32_MAX
     62#  define TCG_TARGET_REG_BITS 32
     63# elif UINTPTR_MAX == UINT64_MAX
     64#  define TCG_TARGET_REG_BITS 64
     65# else
     66#  error Unknown pointer size for tcg target
     67# endif
     68#endif
     69
     70#if TCG_TARGET_REG_BITS == 32
     71typedef int32_t tcg_target_long;
     72typedef uint32_t tcg_target_ulong;
     73#define TCG_PRIlx PRIx32
     74#define TCG_PRIld PRId32
     75#elif TCG_TARGET_REG_BITS == 64
     76typedef int64_t tcg_target_long;
     77typedef uint64_t tcg_target_ulong;
     78#define TCG_PRIlx PRIx64
     79#define TCG_PRIld PRId64
     80#else
     81#error unsupported
     82#endif
     83
     84/* Oversized TCG guests make things like MTTCG hard
     85 * as we can't use atomics for cputlb updates.
     86 */
     87#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
     88#define TCG_OVERSIZED_GUEST 1
     89#else
     90#define TCG_OVERSIZED_GUEST 0
     91#endif
     92
     93#if TCG_TARGET_NB_REGS <= 32
     94typedef uint32_t TCGRegSet;
     95#elif TCG_TARGET_NB_REGS <= 64
     96typedef uint64_t TCGRegSet;
     97#else
     98#error unsupported
     99#endif
    100
    101#if TCG_TARGET_REG_BITS == 32
    102/* Turn some undef macros into false macros.  */
    103#define TCG_TARGET_HAS_extrl_i64_i32    0
    104#define TCG_TARGET_HAS_extrh_i64_i32    0
    105#define TCG_TARGET_HAS_div_i64          0
    106#define TCG_TARGET_HAS_rem_i64          0
    107#define TCG_TARGET_HAS_div2_i64         0
    108#define TCG_TARGET_HAS_rot_i64          0
    109#define TCG_TARGET_HAS_ext8s_i64        0
    110#define TCG_TARGET_HAS_ext16s_i64       0
    111#define TCG_TARGET_HAS_ext32s_i64       0
    112#define TCG_TARGET_HAS_ext8u_i64        0
    113#define TCG_TARGET_HAS_ext16u_i64       0
    114#define TCG_TARGET_HAS_ext32u_i64       0
    115#define TCG_TARGET_HAS_bswap16_i64      0
    116#define TCG_TARGET_HAS_bswap32_i64      0
    117#define TCG_TARGET_HAS_bswap64_i64      0
    118#define TCG_TARGET_HAS_neg_i64          0
    119#define TCG_TARGET_HAS_not_i64          0
    120#define TCG_TARGET_HAS_andc_i64         0
    121#define TCG_TARGET_HAS_orc_i64          0
    122#define TCG_TARGET_HAS_eqv_i64          0
    123#define TCG_TARGET_HAS_nand_i64         0
    124#define TCG_TARGET_HAS_nor_i64          0
    125#define TCG_TARGET_HAS_clz_i64          0
    126#define TCG_TARGET_HAS_ctz_i64          0
    127#define TCG_TARGET_HAS_ctpop_i64        0
    128#define TCG_TARGET_HAS_deposit_i64      0
    129#define TCG_TARGET_HAS_extract_i64      0
    130#define TCG_TARGET_HAS_sextract_i64     0
    131#define TCG_TARGET_HAS_extract2_i64     0
    132#define TCG_TARGET_HAS_movcond_i64      0
    133#define TCG_TARGET_HAS_add2_i64         0
    134#define TCG_TARGET_HAS_sub2_i64         0
    135#define TCG_TARGET_HAS_mulu2_i64        0
    136#define TCG_TARGET_HAS_muls2_i64        0
    137#define TCG_TARGET_HAS_muluh_i64        0
    138#define TCG_TARGET_HAS_mulsh_i64        0
    139/* Turn some undef macros into true macros.  */
    140#define TCG_TARGET_HAS_add2_i32         1
    141#define TCG_TARGET_HAS_sub2_i32         1
    142#endif
    143
    144#ifndef TCG_TARGET_deposit_i32_valid
    145#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
    146#endif
    147#ifndef TCG_TARGET_deposit_i64_valid
    148#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
    149#endif
    150#ifndef TCG_TARGET_extract_i32_valid
    151#define TCG_TARGET_extract_i32_valid(ofs, len) 1
    152#endif
    153#ifndef TCG_TARGET_extract_i64_valid
    154#define TCG_TARGET_extract_i64_valid(ofs, len) 1
    155#endif
    156
    157/* Only one of DIV or DIV2 should be defined.  */
    158#if defined(TCG_TARGET_HAS_div_i32)
    159#define TCG_TARGET_HAS_div2_i32         0
    160#elif defined(TCG_TARGET_HAS_div2_i32)
    161#define TCG_TARGET_HAS_div_i32          0
    162#define TCG_TARGET_HAS_rem_i32          0
    163#endif
    164#if defined(TCG_TARGET_HAS_div_i64)
    165#define TCG_TARGET_HAS_div2_i64         0
    166#elif defined(TCG_TARGET_HAS_div2_i64)
    167#define TCG_TARGET_HAS_div_i64          0
    168#define TCG_TARGET_HAS_rem_i64          0
    169#endif
    170
    171/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
    172#if TCG_TARGET_REG_BITS == 32 \
    173    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
    174         || defined(TCG_TARGET_HAS_muluh_i32))
    175# error "Missing unsigned widening multiply"
    176#endif
    177
    178#if !defined(TCG_TARGET_HAS_v64) \
    179    && !defined(TCG_TARGET_HAS_v128) \
    180    && !defined(TCG_TARGET_HAS_v256)
    181#define TCG_TARGET_MAYBE_vec            0
    182#define TCG_TARGET_HAS_abs_vec          0
    183#define TCG_TARGET_HAS_neg_vec          0
    184#define TCG_TARGET_HAS_not_vec          0
    185#define TCG_TARGET_HAS_andc_vec         0
    186#define TCG_TARGET_HAS_orc_vec          0
    187#define TCG_TARGET_HAS_roti_vec         0
    188#define TCG_TARGET_HAS_rots_vec         0
    189#define TCG_TARGET_HAS_rotv_vec         0
    190#define TCG_TARGET_HAS_shi_vec          0
    191#define TCG_TARGET_HAS_shs_vec          0
    192#define TCG_TARGET_HAS_shv_vec          0
    193#define TCG_TARGET_HAS_mul_vec          0
    194#define TCG_TARGET_HAS_sat_vec          0
    195#define TCG_TARGET_HAS_minmax_vec       0
    196#define TCG_TARGET_HAS_bitsel_vec       0
    197#define TCG_TARGET_HAS_cmpsel_vec       0
    198#else
    199#define TCG_TARGET_MAYBE_vec            1
    200#endif
    201#ifndef TCG_TARGET_HAS_v64
    202#define TCG_TARGET_HAS_v64              0
    203#endif
    204#ifndef TCG_TARGET_HAS_v128
    205#define TCG_TARGET_HAS_v128             0
    206#endif
    207#ifndef TCG_TARGET_HAS_v256
    208#define TCG_TARGET_HAS_v256             0
    209#endif
    210
    211#ifndef TARGET_INSN_START_EXTRA_WORDS
    212# define TARGET_INSN_START_WORDS 1
    213#else
    214# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
    215#endif
    216
    217typedef enum TCGOpcode {
    218#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
    219#include "tcg/tcg-opc.h"
    220#undef DEF
    221    NB_OPS,
    222} TCGOpcode;
    223
    224#define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
    225#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
    226#define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
    227
    228#ifndef TCG_TARGET_INSN_UNIT_SIZE
    229# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
    230#elif TCG_TARGET_INSN_UNIT_SIZE == 1
    231typedef uint8_t tcg_insn_unit;
    232#elif TCG_TARGET_INSN_UNIT_SIZE == 2
    233typedef uint16_t tcg_insn_unit;
    234#elif TCG_TARGET_INSN_UNIT_SIZE == 4
    235typedef uint32_t tcg_insn_unit;
    236#elif TCG_TARGET_INSN_UNIT_SIZE == 8
    237typedef uint64_t tcg_insn_unit;
    238#else
    239/* The port better have done this.  */
    240#endif
    241
    242
    243#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
    244# define tcg_debug_assert(X) do { assert(X); } while (0)
    245#else
    246# define tcg_debug_assert(X) \
    247    do { if (!(X)) { __builtin_unreachable(); } } while (0)
    248#endif
    249
    250typedef struct TCGRelocation TCGRelocation;
    251struct TCGRelocation {
    252    QSIMPLEQ_ENTRY(TCGRelocation) next;
    253    tcg_insn_unit *ptr;
    254    intptr_t addend;
    255    int type;
    256};
    257
    258typedef struct TCGLabel TCGLabel;
    259struct TCGLabel {
    260    unsigned present : 1;
    261    unsigned has_value : 1;
    262    unsigned id : 14;
    263    unsigned refs : 16;
    264    union {
    265        uintptr_t value;
    266        const tcg_insn_unit *value_ptr;
    267    } u;
    268    QSIMPLEQ_HEAD(, TCGRelocation) relocs;
    269    QSIMPLEQ_ENTRY(TCGLabel) next;
    270};
    271
    272typedef struct TCGPool {
    273    struct TCGPool *next;
    274    int size;
    275    uint8_t data[] __attribute__ ((aligned));
    276} TCGPool;
    277
    278#define TCG_POOL_CHUNK_SIZE 32768
    279
    280#define TCG_MAX_TEMPS 512
    281#define TCG_MAX_INSNS 512
    282
    283/* when the size of the arguments of a called function is smaller than
    284   this value, they are statically allocated in the TB stack frame */
    285#define TCG_STATIC_CALL_ARGS_SIZE 128
    286
    287typedef enum TCGType {
    288    TCG_TYPE_I32,
    289    TCG_TYPE_I64,
    290
    291    TCG_TYPE_V64,
    292    TCG_TYPE_V128,
    293    TCG_TYPE_V256,
    294
    295    TCG_TYPE_COUNT, /* number of different types */
    296
    297    /* An alias for the size of the host register.  */
    298#if TCG_TARGET_REG_BITS == 32
    299    TCG_TYPE_REG = TCG_TYPE_I32,
    300#else
    301    TCG_TYPE_REG = TCG_TYPE_I64,
    302#endif
    303
    304    /* An alias for the size of the native pointer.  */
    305#if UINTPTR_MAX == UINT32_MAX
    306    TCG_TYPE_PTR = TCG_TYPE_I32,
    307#else
    308    TCG_TYPE_PTR = TCG_TYPE_I64,
    309#endif
    310
    311    /* An alias for the size of the target "long", aka register.  */
    312#if TARGET_LONG_BITS == 64
    313    TCG_TYPE_TL = TCG_TYPE_I64,
    314#else
    315    TCG_TYPE_TL = TCG_TYPE_I32,
    316#endif
    317} TCGType;
    318
    319/**
    320 * get_alignment_bits
    321 * @memop: MemOp value
    322 *
    323 * Extract the alignment size from the memop.
    324 */
    325static inline unsigned get_alignment_bits(MemOp memop)
    326{
    327    unsigned a = memop & MO_AMASK;
    328
    329    if (a == MO_UNALN) {
    330        /* No alignment required.  */
    331        a = 0;
    332    } else if (a == MO_ALIGN) {
    333        /* A natural alignment requirement.  */
    334        a = memop & MO_SIZE;
    335    } else {
    336        /* A specific alignment requirement.  */
    337        a = a >> MO_ASHIFT;
    338    }
    339#if defined(CONFIG_SOFTMMU)
    340    /* The requested alignment cannot overlap the TLB flags.  */
    341    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
    342#endif
    343    return a;
    344}
    345
    346typedef tcg_target_ulong TCGArg;
    347
    348/* Define type and accessor macros for TCG variables.
    349
    350   TCG variables are the inputs and outputs of TCG ops, as described
    351   in tcg/README. Target CPU front-end code uses these types to deal
    352   with TCG variables as it emits TCG code via the tcg_gen_* functions.
    353   They come in several flavours:
    354    * TCGv_i32 : 32 bit integer type
    355    * TCGv_i64 : 64 bit integer type
    356    * TCGv_ptr : a host pointer type
    357    * TCGv_vec : a host vector type; the exact size is not exposed
    358                 to the CPU front-end code.
    359    * TCGv : an integer type the same size as target_ulong
    360             (an alias for either TCGv_i32 or TCGv_i64)
    361   The compiler's type checking will complain if you mix them
    362   up and pass the wrong sized TCGv to a function.
    363
    364   Users of tcg_gen_* don't need to know about any of the internal
    365   details of these, and should treat them as opaque types.
    366   You won't be able to look inside them in a debugger either.
    367
    368   Internal implementation details follow:
    369
    370   Note that there is no definition of the structs TCGv_i32_d etc anywhere.
    371   This is deliberate, because the values we store in variables of type
    372   TCGv_i32 are not really pointers-to-structures. They're just small
    373   integers, but keeping them in pointer types like this means that the
    374   compiler will complain if you accidentally pass a TCGv_i32 to a
    375   function which takes a TCGv_i64, and so on. Only the internals of
    376   TCG need to care about the actual contents of the types.  */
    377
    378typedef struct TCGv_i32_d *TCGv_i32;
    379typedef struct TCGv_i64_d *TCGv_i64;
    380typedef struct TCGv_ptr_d *TCGv_ptr;
    381typedef struct TCGv_vec_d *TCGv_vec;
    382typedef TCGv_ptr TCGv_env;
    383#if TARGET_LONG_BITS == 32
    384#define TCGv TCGv_i32
    385#elif TARGET_LONG_BITS == 64
    386#define TCGv TCGv_i64
    387#else
    388#error Unhandled TARGET_LONG_BITS value
    389#endif
    390
    391/* call flags */
    392/* Helper does not read globals (either directly or through an exception). It
    393   implies TCG_CALL_NO_WRITE_GLOBALS. */
    394#define TCG_CALL_NO_READ_GLOBALS    0x0001
    395/* Helper does not write globals */
    396#define TCG_CALL_NO_WRITE_GLOBALS   0x0002
    397/* Helper can be safely suppressed if the return value is not used. */
    398#define TCG_CALL_NO_SIDE_EFFECTS    0x0004
    399/* Helper is QEMU_NORETURN.  */
    400#define TCG_CALL_NO_RETURN          0x0008
    401
    402/* convenience version of most used call flags */
    403#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
    404#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
    405#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
    406#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
    407#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
    408
    409/* Used to align parameters.  See the comment before tcgv_i32_temp.  */
    410#define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
    411
    412/*
    413 * Flags for the bswap opcodes.
    414 * If IZ, the input is zero-extended, otherwise unknown.
    415 * If OZ or OS, the output is zero- or sign-extended respectively,
    416 * otherwise the high bits are undefined.
    417 */
    418enum {
    419    TCG_BSWAP_IZ = 1,
    420    TCG_BSWAP_OZ = 2,
    421    TCG_BSWAP_OS = 4,
    422};
    423
    424typedef enum TCGTempVal {
    425    TEMP_VAL_DEAD,
    426    TEMP_VAL_REG,
    427    TEMP_VAL_MEM,
    428    TEMP_VAL_CONST,
    429} TCGTempVal;
    430
    431typedef enum TCGTempKind {
    432    /* Temp is dead at the end of all basic blocks. */
    433    TEMP_NORMAL,
    434    /* Temp is saved across basic blocks but dead at the end of TBs. */
    435    TEMP_LOCAL,
    436    /* Temp is saved across both basic blocks and translation blocks. */
    437    TEMP_GLOBAL,
    438    /* Temp is in a fixed register. */
    439    TEMP_FIXED,
    440    /* Temp is a fixed constant. */
    441    TEMP_CONST,
    442} TCGTempKind;
    443
    444typedef struct TCGTemp {
    445    TCGReg reg:8;
    446    TCGTempVal val_type:8;
    447    TCGType base_type:8;
    448    TCGType type:8;
    449    TCGTempKind kind:3;
    450    unsigned int indirect_reg:1;
    451    unsigned int indirect_base:1;
    452    unsigned int mem_coherent:1;
    453    unsigned int mem_allocated:1;
    454    unsigned int temp_allocated:1;
    455
    456    int64_t val;
    457    struct TCGTemp *mem_base;
    458    intptr_t mem_offset;
    459    const char *name;
    460
    461    /* Pass-specific information that can be stored for a temporary.
    462       One word worth of integer data, and one pointer to data
    463       allocated separately.  */
    464    uintptr_t state;
    465    void *state_ptr;
    466} TCGTemp;
    467
    468typedef struct TCGContext TCGContext;
    469
    470typedef struct TCGTempSet {
    471    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
    472} TCGTempSet;
    473
    474/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
    475   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
    476   There are never more than 2 outputs, which means that we can store all
    477   dead + sync data within 16 bits.  */
    478#define DEAD_ARG  4
    479#define SYNC_ARG  1
    480typedef uint16_t TCGLifeData;
    481
    482/* The layout here is designed to avoid a bitfield crossing of
    483   a 32-bit boundary, which would cause GCC to add extra padding.  */
    484typedef struct TCGOp {
    485    TCGOpcode opc   : 8;        /*  8 */
    486
    487    /* Parameters for this opcode.  See below.  */
    488    unsigned param1 : 4;        /* 12 */
    489    unsigned param2 : 4;        /* 16 */
    490
    491    /* Lifetime data of the operands.  */
    492    unsigned life   : 16;       /* 32 */
    493
    494    /* Next and previous opcodes.  */
    495    QTAILQ_ENTRY(TCGOp) link;
    496
    497    /* Arguments for the opcode.  */
    498    TCGArg args[MAX_OPC_PARAM];
    499
    500    /* Register preferences for the output(s).  */
    501    TCGRegSet output_pref[2];
    502} TCGOp;
    503
    504#define TCGOP_CALLI(X)    (X)->param1
    505#define TCGOP_CALLO(X)    (X)->param2
    506
    507#define TCGOP_VECL(X)     (X)->param1
    508#define TCGOP_VECE(X)     (X)->param2
    509
    510/* Make sure operands fit in the bitfields above.  */
    511QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
    512
    513typedef struct TCGProfile {
    514    int64_t cpu_exec_time;
    515    int64_t tb_count1;
    516    int64_t tb_count;
    517    int64_t op_count; /* total insn count */
    518    int op_count_max; /* max insn per TB */
    519    int temp_count_max;
    520    int64_t temp_count;
    521    int64_t del_op_count;
    522    int64_t code_in_len;
    523    int64_t code_out_len;
    524    int64_t search_out_len;
    525    int64_t interm_time;
    526    int64_t code_time;
    527    int64_t la_time;
    528    int64_t opt_time;
    529    int64_t restore_count;
    530    int64_t restore_time;
    531    int64_t table_op_count[NB_OPS];
    532} TCGProfile;
    533
    534struct TCGContext {
    535    uint8_t *pool_cur, *pool_end;
    536    TCGPool *pool_first, *pool_current, *pool_first_large;
    537    int nb_labels;
    538    int nb_globals;
    539    int nb_temps;
    540    int nb_indirects;
    541    int nb_ops;
    542
    543    /* goto_tb support */
    544    tcg_insn_unit *code_buf;
    545    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
    546    uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
    547    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
    548
    549    TCGRegSet reserved_regs;
    550    uint32_t tb_cflags; /* cflags of the current TB */
    551    intptr_t current_frame_offset;
    552    intptr_t frame_start;
    553    intptr_t frame_end;
    554    TCGTemp *frame_temp;
    555
    556    tcg_insn_unit *code_ptr;
    557
    558#ifdef CONFIG_PROFILER
    559    TCGProfile prof;
    560#endif
    561
    562#ifdef CONFIG_DEBUG_TCG
    563    int temps_in_use;
    564    int goto_tb_issue_mask;
    565    const TCGOpcode *vecop_list;
    566#endif
    567
    568    /* Code generation.  Note that we specifically do not use tcg_insn_unit
    569       here, because there's too much arithmetic throughout that relies
    570       on addition and subtraction working on bytes.  Rely on the GCC
    571       extension that allows arithmetic on void*.  */
    572    void *code_gen_buffer;
    573    size_t code_gen_buffer_size;
    574    void *code_gen_ptr;
    575    void *data_gen_ptr;
    576
    577    /* Threshold to flush the translated code buffer.  */
    578    void *code_gen_highwater;
    579
    580    /* Track which vCPU triggers events */
    581    CPUState *cpu;                      /* *_trans */
    582
    583    /* These structures are private to tcg-target.c.inc.  */
    584#ifdef TCG_TARGET_NEED_LDST_LABELS
    585    QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
    586#endif
    587#ifdef TCG_TARGET_NEED_POOL_LABELS
    588    struct TCGLabelPoolData *pool_labels;
    589#endif
    590
    591    TCGLabel *exitreq_label;
    592
    593#ifdef CONFIG_PLUGIN
    594    /*
    595     * We keep one plugin_tb struct per TCGContext. Note that on every TB
    596     * translation we clear but do not free its contents; this way we
    597     * avoid a lot of malloc/free churn, since after a few TB's it's
    598     * unlikely that we'll need to allocate either more instructions or more
    599     * space for instructions (for variable-instruction-length ISAs).
    600     */
    601    struct qemu_plugin_tb *plugin_tb;
    602
    603    /* descriptor of the instruction being translated */
    604    struct qemu_plugin_insn *plugin_insn;
    605#endif
    606
    607    GHashTable *const_table[TCG_TYPE_COUNT];
    608    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
    609    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
    610
    611    QTAILQ_HEAD(, TCGOp) ops, free_ops;
    612    QSIMPLEQ_HEAD(, TCGLabel) labels;
    613
    614    /* Tells which temporary holds a given register.
    615       It does not take into account fixed registers */
    616    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
    617
    618    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
    619    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
    620
    621    /* Exit to translator on overflow. */
    622    sigjmp_buf jmp_trans;
    623};
    624
    625static inline bool temp_readonly(TCGTemp *ts)
    626{
    627    return ts->kind >= TEMP_FIXED;
    628}
    629
    630extern __thread TCGContext *tcg_ctx;
    631extern const void *tcg_code_gen_epilogue;
    632extern uintptr_t tcg_splitwx_diff;
    633extern TCGv_env cpu_env;
    634
    635bool in_code_gen_buffer(const void *p);
    636
    637#ifdef CONFIG_DEBUG_TCG
    638const void *tcg_splitwx_to_rx(void *rw);
    639void *tcg_splitwx_to_rw(const void *rx);
    640#else
    641static inline const void *tcg_splitwx_to_rx(void *rw)
    642{
    643    return rw ? rw + tcg_splitwx_diff : NULL;
    644}
    645
    646static inline void *tcg_splitwx_to_rw(const void *rx)
    647{
    648    return rx ? (void *)rx - tcg_splitwx_diff : NULL;
    649}
    650#endif
    651
    652static inline size_t temp_idx(TCGTemp *ts)
    653{
    654    ptrdiff_t n = ts - tcg_ctx->temps;
    655    tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
    656    return n;
    657}
    658
    659static inline TCGArg temp_arg(TCGTemp *ts)
    660{
    661    return (uintptr_t)ts;
    662}
    663
    664static inline TCGTemp *arg_temp(TCGArg a)
    665{
    666    return (TCGTemp *)(uintptr_t)a;
    667}
    668
    669/* Using the offset of a temporary, relative to TCGContext, rather than
    670   its index means that we don't use 0.  That leaves offset 0 free for
    671   a NULL representation without having to leave index 0 unused.  */
    672static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
    673{
    674    uintptr_t o = (uintptr_t)v;
    675    TCGTemp *t = (void *)tcg_ctx + o;
    676    tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
    677    return t;
    678}
    679
    680static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
    681{
    682    return tcgv_i32_temp((TCGv_i32)v);
    683}
    684
    685static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
    686{
    687    return tcgv_i32_temp((TCGv_i32)v);
    688}
    689
    690static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
    691{
    692    return tcgv_i32_temp((TCGv_i32)v);
    693}
    694
    695static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
    696{
    697    return temp_arg(tcgv_i32_temp(v));
    698}
    699
    700static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
    701{
    702    return temp_arg(tcgv_i64_temp(v));
    703}
    704
    705static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
    706{
    707    return temp_arg(tcgv_ptr_temp(v));
    708}
    709
    710static inline TCGArg tcgv_vec_arg(TCGv_vec v)
    711{
    712    return temp_arg(tcgv_vec_temp(v));
    713}
    714
    715static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
    716{
    717    (void)temp_idx(t); /* trigger embedded assert */
    718    return (TCGv_i32)((void *)t - (void *)tcg_ctx);
    719}
    720
    721static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
    722{
    723    return (TCGv_i64)temp_tcgv_i32(t);
    724}
    725
    726static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
    727{
    728    return (TCGv_ptr)temp_tcgv_i32(t);
    729}
    730
    731static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
    732{
    733    return (TCGv_vec)temp_tcgv_i32(t);
    734}
    735
    736#if TCG_TARGET_REG_BITS == 32
    737static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
    738{
    739    return temp_tcgv_i32(tcgv_i64_temp(t));
    740}
    741
    742static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
    743{
    744    return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
    745}
    746#endif
    747
    748static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
    749{
    750    return op->args[arg];
    751}
    752
    753static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
    754{
    755    op->args[arg] = v;
    756}
    757
    758static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg)
    759{
    760#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
    761    return tcg_get_insn_param(op, arg);
    762#else
    763    return tcg_get_insn_param(op, arg * 2) |
    764           ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32);
    765#endif
    766}
    767
    768static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
    769{
    770#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
    771    tcg_set_insn_param(op, arg, v);
    772#else
    773    tcg_set_insn_param(op, arg * 2, v);
    774    tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
    775#endif
    776}
    777
    778/* The last op that was emitted.  */
    779static inline TCGOp *tcg_last_op(void)
    780{
    781    return QTAILQ_LAST(&tcg_ctx->ops);
    782}
    783
    784/* Test for whether to terminate the TB for using too many opcodes.  */
    785static inline bool tcg_op_buf_full(void)
    786{
    787    /* This is not a hard limit, it merely stops translation when
    788     * we have produced "enough" opcodes.  We want to limit TB size
    789     * such that a RISC host can reasonably use a 16-bit signed
    790     * branch within the TB.  We also need to be mindful of the
    791     * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
    792     * and TCGContext.gen_insn_end_off[].
    793     */
    794    return tcg_ctx->nb_ops >= 4000;
    795}
    796
    797/* pool based memory allocation */
    798
    799/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
    800void *tcg_malloc_internal(TCGContext *s, int size);
    801void tcg_pool_reset(TCGContext *s);
    802TranslationBlock *tcg_tb_alloc(TCGContext *s);
    803
    804void tcg_region_reset_all(void);
    805
    806size_t tcg_code_size(void);
    807size_t tcg_code_capacity(void);
    808
    809void tcg_tb_insert(TranslationBlock *tb);
    810void tcg_tb_remove(TranslationBlock *tb);
    811TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
    812void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
    813size_t tcg_nb_tbs(void);
    814
    815/* user-mode: Called with mmap_lock held.  */
    816static inline void *tcg_malloc(int size)
    817{
    818    TCGContext *s = tcg_ctx;
    819    uint8_t *ptr, *ptr_end;
    820
    821    /* ??? This is a weak placeholder for minimum malloc alignment.  */
    822    size = QEMU_ALIGN_UP(size, 8);
    823
    824    ptr = s->pool_cur;
    825    ptr_end = ptr + size;
    826    if (unlikely(ptr_end > s->pool_end)) {
    827        return tcg_malloc_internal(tcg_ctx, size);
    828    } else {
    829        s->pool_cur = ptr_end;
    830        return ptr;
    831    }
    832}
    833
    834void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
    835void tcg_register_thread(void);
    836void tcg_prologue_init(TCGContext *s);
    837void tcg_func_start(TCGContext *s);
    838
    839int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
    840
    841void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
    842
    843TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
    844                                     intptr_t, const char *);
    845TCGTemp *tcg_temp_new_internal(TCGType, bool);
    846void tcg_temp_free_internal(TCGTemp *);
    847TCGv_vec tcg_temp_new_vec(TCGType type);
    848TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
    849
    850static inline void tcg_temp_free_i32(TCGv_i32 arg)
    851{
    852    tcg_temp_free_internal(tcgv_i32_temp(arg));
    853}
    854
    855static inline void tcg_temp_free_i64(TCGv_i64 arg)
    856{
    857    tcg_temp_free_internal(tcgv_i64_temp(arg));
    858}
    859
    860static inline void tcg_temp_free_ptr(TCGv_ptr arg)
    861{
    862    tcg_temp_free_internal(tcgv_ptr_temp(arg));
    863}
    864
    865static inline void tcg_temp_free_vec(TCGv_vec arg)
    866{
    867    tcg_temp_free_internal(tcgv_vec_temp(arg));
    868}
    869
    870static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
    871                                              const char *name)
    872{
    873    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
    874    return temp_tcgv_i32(t);
    875}
    876
    877static inline TCGv_i32 tcg_temp_new_i32(void)
    878{
    879    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
    880    return temp_tcgv_i32(t);
    881}
    882
    883static inline TCGv_i32 tcg_temp_local_new_i32(void)
    884{
    885    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
    886    return temp_tcgv_i32(t);
    887}
    888
    889static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
    890                                              const char *name)
    891{
    892    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
    893    return temp_tcgv_i64(t);
    894}
    895
    896static inline TCGv_i64 tcg_temp_new_i64(void)
    897{
    898    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
    899    return temp_tcgv_i64(t);
    900}
    901
    902static inline TCGv_i64 tcg_temp_local_new_i64(void)
    903{
    904    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
    905    return temp_tcgv_i64(t);
    906}
    907
    908static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
    909                                              const char *name)
    910{
    911    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
    912    return temp_tcgv_ptr(t);
    913}
    914
    915static inline TCGv_ptr tcg_temp_new_ptr(void)
    916{
    917    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
    918    return temp_tcgv_ptr(t);
    919}
    920
    921static inline TCGv_ptr tcg_temp_local_new_ptr(void)
    922{
    923    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
    924    return temp_tcgv_ptr(t);
    925}
    926
    927#if defined(CONFIG_DEBUG_TCG)
    928/* If you call tcg_clear_temp_count() at the start of a section of
    929 * code which is not supposed to leak any TCG temporaries, then
    930 * calling tcg_check_temp_count() at the end of the section will
    931 * return 1 if the section did in fact leak a temporary.
    932 */
    933void tcg_clear_temp_count(void);
    934int tcg_check_temp_count(void);
    935#else
    936#define tcg_clear_temp_count() do { } while (0)
    937#define tcg_check_temp_count() 0
    938#endif
    939
    940int64_t tcg_cpu_exec_time(void);
    941void tcg_dump_info(void);
    942void tcg_dump_op_count(void);
    943
    944#define TCG_CT_CONST  1 /* any constant of register size */
    945
    946typedef struct TCGArgConstraint {
    947    unsigned ct : 16;
    948    unsigned alias_index : 4;
    949    unsigned sort_index : 4;
    950    bool oalias : 1;
    951    bool ialias : 1;
    952    bool newreg : 1;
    953    TCGRegSet regs;
    954} TCGArgConstraint;
    955
    956#define TCG_MAX_OP_ARGS 16
    957
    958/* Bits for TCGOpDef->flags, 8 bits available, all used.  */
    959enum {
    960    /* Instruction exits the translation block.  */
    961    TCG_OPF_BB_EXIT      = 0x01,
    962    /* Instruction defines the end of a basic block.  */
    963    TCG_OPF_BB_END       = 0x02,
    964    /* Instruction clobbers call registers and potentially update globals.  */
    965    TCG_OPF_CALL_CLOBBER = 0x04,
    966    /* Instruction has side effects: it cannot be removed if its outputs
    967       are not used, and might trigger exceptions.  */
    968    TCG_OPF_SIDE_EFFECTS = 0x08,
    969    /* Instruction operands are 64-bits (otherwise 32-bits).  */
    970    TCG_OPF_64BIT        = 0x10,
    971    /* Instruction is optional and not implemented by the host, or insn
    972       is generic and should not be implemened by the host.  */
    973    TCG_OPF_NOT_PRESENT  = 0x20,
    974    /* Instruction operands are vectors.  */
    975    TCG_OPF_VECTOR       = 0x40,
    976    /* Instruction is a conditional branch. */
    977    TCG_OPF_COND_BRANCH  = 0x80
    978};
    979
    980typedef struct TCGOpDef {
    981    const char *name;
    982    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
    983    uint8_t flags;
    984    TCGArgConstraint *args_ct;
    985} TCGOpDef;
    986
    987extern TCGOpDef tcg_op_defs[];
    988extern const size_t tcg_op_defs_max;
    989
    990typedef struct TCGTargetOpDef {
    991    TCGOpcode op;
    992    const char *args_ct_str[TCG_MAX_OP_ARGS];
    993} TCGTargetOpDef;
    994
    995#define tcg_abort() \
    996do {\
    997    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
    998    abort();\
    999} while (0)
   1000
   1001bool tcg_op_supported(TCGOpcode op);
   1002
   1003void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
   1004
   1005TCGOp *tcg_emit_op(TCGOpcode opc);
   1006void tcg_op_remove(TCGContext *s, TCGOp *op);
   1007TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
   1008TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
   1009
   1010/**
   1011 * tcg_remove_ops_after:
   1012 * @op: target operation
   1013 *
   1014 * Discard any opcodes emitted since @op.  Expected usage is to save
   1015 * a starting point with tcg_last_op(), speculatively emit opcodes,
   1016 * then decide whether or not to keep those opcodes after the fact.
   1017 */
   1018void tcg_remove_ops_after(TCGOp *op);
   1019
   1020void tcg_optimize(TCGContext *s);
   1021
   1022/* Allocate a new temporary and initialize it with a constant. */
   1023TCGv_i32 tcg_const_i32(int32_t val);
   1024TCGv_i64 tcg_const_i64(int64_t val);
   1025TCGv_i32 tcg_const_local_i32(int32_t val);
   1026TCGv_i64 tcg_const_local_i64(int64_t val);
   1027TCGv_vec tcg_const_zeros_vec(TCGType);
   1028TCGv_vec tcg_const_ones_vec(TCGType);
   1029TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
   1030TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
   1031
   1032/*
   1033 * Locate or create a read-only temporary that is a constant.
   1034 * This kind of temporary need not be freed, but for convenience
   1035 * will be silently ignored by tcg_temp_free_*.
   1036 */
   1037TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
   1038
   1039static inline TCGv_i32 tcg_constant_i32(int32_t val)
   1040{
   1041    return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
   1042}
   1043
   1044static inline TCGv_i64 tcg_constant_i64(int64_t val)
   1045{
   1046    return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
   1047}
   1048
   1049TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
   1050TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
   1051
   1052#if UINTPTR_MAX == UINT32_MAX
   1053# define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
   1054# define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
   1055#else
   1056# define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
   1057# define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
   1058#endif
   1059
   1060TCGLabel *gen_new_label(void);
   1061
   1062/**
   1063 * label_arg
   1064 * @l: label
   1065 *
   1066 * Encode a label for storage in the TCG opcode stream.
   1067 */
   1068
   1069static inline TCGArg label_arg(TCGLabel *l)
   1070{
   1071    return (uintptr_t)l;
   1072}
   1073
   1074/**
   1075 * arg_label
   1076 * @i: value
   1077 *
   1078 * The opposite of label_arg.  Retrieve a label from the
   1079 * encoding of the TCG opcode stream.
   1080 */
   1081
   1082static inline TCGLabel *arg_label(TCGArg i)
   1083{
   1084    return (TCGLabel *)(uintptr_t)i;
   1085}
   1086
   1087/**
   1088 * tcg_ptr_byte_diff
   1089 * @a, @b: addresses to be differenced
   1090 *
   1091 * There are many places within the TCG backends where we need a byte
   1092 * difference between two pointers.  While this can be accomplished
   1093 * with local casting, it's easy to get wrong -- especially if one is
   1094 * concerned with the signedness of the result.
   1095 *
   1096 * This version relies on GCC's void pointer arithmetic to get the
   1097 * correct result.
   1098 */
   1099
   1100static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
   1101{
   1102    return a - b;
   1103}
   1104
   1105/**
   1106 * tcg_pcrel_diff
   1107 * @s: the tcg context
   1108 * @target: address of the target
   1109 *
   1110 * Produce a pc-relative difference, from the current code_ptr
   1111 * to the destination address.
   1112 */
   1113
   1114static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
   1115{
   1116    return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
   1117}
   1118
   1119/**
   1120 * tcg_tbrel_diff
   1121 * @s: the tcg context
   1122 * @target: address of the target
   1123 *
   1124 * Produce a difference, from the beginning of the current TB code
   1125 * to the destination address.
   1126 */
   1127static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
   1128{
   1129    return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
   1130}
   1131
   1132/**
   1133 * tcg_current_code_size
   1134 * @s: the tcg context
   1135 *
   1136 * Compute the current code size within the translation block.
   1137 * This is used to fill in qemu's data structures for goto_tb.
   1138 */
   1139
   1140static inline size_t tcg_current_code_size(TCGContext *s)
   1141{
   1142    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
   1143}
   1144
   1145/**
   1146 * tcg_qemu_tb_exec:
   1147 * @env: pointer to CPUArchState for the CPU
   1148 * @tb_ptr: address of generated code for the TB to execute
   1149 *
   1150 * Start executing code from a given translation block.
   1151 * Where translation blocks have been linked, execution
   1152 * may proceed from the given TB into successive ones.
   1153 * Control eventually returns only when some action is needed
   1154 * from the top-level loop: either control must pass to a TB
   1155 * which has not yet been directly linked, or an asynchronous
   1156 * event such as an interrupt needs handling.
   1157 *
   1158 * Return: The return value is the value passed to the corresponding
   1159 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
   1160 * The value is either zero or a 4-byte aligned pointer to that TB combined
   1161 * with additional information in its two least significant bits. The
   1162 * additional information is encoded as follows:
   1163 *  0, 1: the link between this TB and the next is via the specified
   1164 *        TB index (0 or 1). That is, we left the TB via (the equivalent
   1165 *        of) "goto_tb <index>". The main loop uses this to determine
   1166 *        how to link the TB just executed to the next.
   1167 *  2:    we are using instruction counting code generation, and we
   1168 *        did not start executing this TB because the instruction counter
   1169 *        would hit zero midway through it. In this case the pointer
   1170 *        returned is the TB we were about to execute, and the caller must
   1171 *        arrange to execute the remaining count of instructions.
   1172 *  3:    we stopped because the CPU's exit_request flag was set
   1173 *        (usually meaning that there is an interrupt that needs to be
   1174 *        handled). The pointer returned is the TB we were about to execute
   1175 *        when we noticed the pending exit request.
   1176 *
   1177 * If the bottom two bits indicate an exit-via-index then the CPU
   1178 * state is correctly synchronised and ready for execution of the next
   1179 * TB (and in particular the guest PC is the address to execute next).
   1180 * Otherwise, we gave up on execution of this TB before it started, and
   1181 * the caller must fix up the CPU state by calling the CPU's
   1182 * synchronize_from_tb() method with the TB pointer we return (falling
   1183 * back to calling the CPU's set_pc method with tb->pb if no
   1184 * synchronize_from_tb() method exists).
   1185 *
   1186 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
   1187 * to this default (which just calls the prologue.code emitted by
   1188 * tcg_target_qemu_prologue()).
   1189 */
   1190#define TB_EXIT_MASK      3
   1191#define TB_EXIT_IDX0      0
   1192#define TB_EXIT_IDX1      1
   1193#define TB_EXIT_IDXMAX    1
   1194#define TB_EXIT_REQUESTED 3
   1195
   1196#ifdef CONFIG_TCG_INTERPRETER
   1197uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
   1198#else
   1199typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
   1200extern tcg_prologue_fn *tcg_qemu_tb_exec;
   1201#endif
   1202
   1203void tcg_register_jit(const void *buf, size_t buf_size);
   1204
   1205#if TCG_TARGET_MAYBE_vec
   1206/* Return zero if the tuple (opc, type, vece) is unsupportable;
   1207   return > 0 if it is directly supportable;
   1208   return < 0 if we must call tcg_expand_vec_op.  */
   1209int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
   1210#else
   1211static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
   1212{
   1213    return 0;
   1214}
   1215#endif
   1216
   1217/* Expand the tuple (opc, type, vece) on the given arguments.  */
   1218void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
   1219
   1220/* Replicate a constant C accoring to the log2 of the element size.  */
   1221uint64_t dup_const(unsigned vece, uint64_t c);
   1222
   1223#define dup_const(VECE, C)                                         \
   1224    (__builtin_constant_p(VECE)                                    \
   1225     ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
   1226        : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
   1227        : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
   1228        : (VECE) == MO_64 ? (uint64_t)(C)                          \
   1229        : (qemu_build_not_reached_always(), 0))                    \
   1230     : dup_const(VECE, C))
   1231
   1232#if TARGET_LONG_BITS == 64
   1233# define dup_const_tl  dup_const
   1234#else
   1235# define dup_const_tl(VECE, C)                                     \
   1236    (__builtin_constant_p(VECE)                                    \
   1237     ? (  (VECE) == MO_8  ? 0x01010101ul * (uint8_t)(C)            \
   1238        : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C)           \
   1239        : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C)           \
   1240        : (qemu_build_not_reached_always(), 0))                    \
   1241     :  (target_long)dup_const(VECE, C))
   1242#endif
   1243
   1244/*
   1245 * Memory helpers that will be used by TCG generated code.
   1246 */
   1247#ifdef CONFIG_SOFTMMU
   1248/* Value zero-extended to tcg register size.  */
   1249tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
   1250                                     MemOpIdx oi, uintptr_t retaddr);
   1251tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
   1252                                    MemOpIdx oi, uintptr_t retaddr);
   1253tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
   1254                                    MemOpIdx oi, uintptr_t retaddr);
   1255uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
   1256                           MemOpIdx oi, uintptr_t retaddr);
   1257tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
   1258                                    MemOpIdx oi, uintptr_t retaddr);
   1259tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
   1260                                    MemOpIdx oi, uintptr_t retaddr);
   1261uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
   1262                           MemOpIdx oi, uintptr_t retaddr);
   1263
   1264/* Value sign-extended to tcg register size.  */
   1265tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
   1266                                     MemOpIdx oi, uintptr_t retaddr);
   1267tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
   1268                                    MemOpIdx oi, uintptr_t retaddr);
   1269tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
   1270                                    MemOpIdx oi, uintptr_t retaddr);
   1271tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
   1272                                    MemOpIdx oi, uintptr_t retaddr);
   1273tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
   1274                                    MemOpIdx oi, uintptr_t retaddr);
   1275
   1276void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
   1277                        MemOpIdx oi, uintptr_t retaddr);
   1278void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
   1279                       MemOpIdx oi, uintptr_t retaddr);
   1280void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
   1281                       MemOpIdx oi, uintptr_t retaddr);
   1282void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
   1283                       MemOpIdx oi, uintptr_t retaddr);
   1284void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
   1285                       MemOpIdx oi, uintptr_t retaddr);
   1286void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
   1287                       MemOpIdx oi, uintptr_t retaddr);
   1288void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
   1289                       MemOpIdx oi, uintptr_t retaddr);
   1290
   1291/* Temporary aliases until backends are converted.  */
   1292#ifdef TARGET_WORDS_BIGENDIAN
   1293# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
   1294# define helper_ret_lduw_mmu  helper_be_lduw_mmu
   1295# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
   1296# define helper_ret_ldul_mmu  helper_be_ldul_mmu
   1297# define helper_ret_ldl_mmu   helper_be_ldul_mmu
   1298# define helper_ret_ldq_mmu   helper_be_ldq_mmu
   1299# define helper_ret_stw_mmu   helper_be_stw_mmu
   1300# define helper_ret_stl_mmu   helper_be_stl_mmu
   1301# define helper_ret_stq_mmu   helper_be_stq_mmu
   1302#else
   1303# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
   1304# define helper_ret_lduw_mmu  helper_le_lduw_mmu
   1305# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
   1306# define helper_ret_ldul_mmu  helper_le_ldul_mmu
   1307# define helper_ret_ldl_mmu   helper_le_ldul_mmu
   1308# define helper_ret_ldq_mmu   helper_le_ldq_mmu
   1309# define helper_ret_stw_mmu   helper_le_stw_mmu
   1310# define helper_ret_stl_mmu   helper_le_stl_mmu
   1311# define helper_ret_stq_mmu   helper_le_stq_mmu
   1312#endif
   1313#endif /* CONFIG_SOFTMMU */
   1314
   1315uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
   1316                                 uint32_t cmpv, uint32_t newv,
   1317                                 MemOpIdx oi, uintptr_t retaddr);
   1318uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
   1319                                    uint32_t cmpv, uint32_t newv,
   1320                                    MemOpIdx oi, uintptr_t retaddr);
   1321uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
   1322                                    uint32_t cmpv, uint32_t newv,
   1323                                    MemOpIdx oi, uintptr_t retaddr);
   1324uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
   1325                                    uint64_t cmpv, uint64_t newv,
   1326                                    MemOpIdx oi, uintptr_t retaddr);
   1327uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
   1328                                    uint32_t cmpv, uint32_t newv,
   1329                                    MemOpIdx oi, uintptr_t retaddr);
   1330uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
   1331                                    uint32_t cmpv, uint32_t newv,
   1332                                    MemOpIdx oi, uintptr_t retaddr);
   1333uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
   1334                                    uint64_t cmpv, uint64_t newv,
   1335                                    MemOpIdx oi, uintptr_t retaddr);
   1336
   1337#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
   1338TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu            \
   1339    (CPUArchState *env, target_ulong addr, TYPE val,  \
   1340     MemOpIdx oi, uintptr_t retaddr);
   1341
   1342#ifdef CONFIG_ATOMIC64
   1343#define GEN_ATOMIC_HELPER_ALL(NAME)          \
   1344    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
   1345    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
   1346    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
   1347    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
   1348    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
   1349    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
   1350    GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
   1351#else
   1352#define GEN_ATOMIC_HELPER_ALL(NAME)          \
   1353    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
   1354    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
   1355    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
   1356    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
   1357    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
   1358#endif
   1359
   1360GEN_ATOMIC_HELPER_ALL(fetch_add)
   1361GEN_ATOMIC_HELPER_ALL(fetch_sub)
   1362GEN_ATOMIC_HELPER_ALL(fetch_and)
   1363GEN_ATOMIC_HELPER_ALL(fetch_or)
   1364GEN_ATOMIC_HELPER_ALL(fetch_xor)
   1365GEN_ATOMIC_HELPER_ALL(fetch_smin)
   1366GEN_ATOMIC_HELPER_ALL(fetch_umin)
   1367GEN_ATOMIC_HELPER_ALL(fetch_smax)
   1368GEN_ATOMIC_HELPER_ALL(fetch_umax)
   1369
   1370GEN_ATOMIC_HELPER_ALL(add_fetch)
   1371GEN_ATOMIC_HELPER_ALL(sub_fetch)
   1372GEN_ATOMIC_HELPER_ALL(and_fetch)
   1373GEN_ATOMIC_HELPER_ALL(or_fetch)
   1374GEN_ATOMIC_HELPER_ALL(xor_fetch)
   1375GEN_ATOMIC_HELPER_ALL(smin_fetch)
   1376GEN_ATOMIC_HELPER_ALL(umin_fetch)
   1377GEN_ATOMIC_HELPER_ALL(smax_fetch)
   1378GEN_ATOMIC_HELPER_ALL(umax_fetch)
   1379
   1380GEN_ATOMIC_HELPER_ALL(xchg)
   1381
   1382#undef GEN_ATOMIC_HELPER_ALL
   1383#undef GEN_ATOMIC_HELPER
   1384
   1385Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
   1386                                  Int128 cmpv, Int128 newv,
   1387                                  MemOpIdx oi, uintptr_t retaddr);
   1388Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
   1389                                  Int128 cmpv, Int128 newv,
   1390                                  MemOpIdx oi, uintptr_t retaddr);
   1391
   1392Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
   1393                             MemOpIdx oi, uintptr_t retaddr);
   1394Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
   1395                             MemOpIdx oi, uintptr_t retaddr);
   1396void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
   1397                           MemOpIdx oi, uintptr_t retaddr);
   1398void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
   1399                           MemOpIdx oi, uintptr_t retaddr);
   1400
   1401#ifdef CONFIG_DEBUG_TCG
   1402void tcg_assert_listed_vecop(TCGOpcode);
   1403#else
   1404static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
   1405#endif
   1406
   1407static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
   1408{
   1409#ifdef CONFIG_DEBUG_TCG
   1410    const TCGOpcode *o = tcg_ctx->vecop_list;
   1411    tcg_ctx->vecop_list = n;
   1412    return o;
   1413#else
   1414    return NULL;
   1415#endif
   1416}
   1417
   1418bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
   1419
   1420#endif /* TCG_H */