translate.h (21151B)
1#ifndef TARGET_ARM_TRANSLATE_H 2#define TARGET_ARM_TRANSLATE_H 3 4#include "exec/translator.h" 5#include "internals.h" 6 7 8/* internal defines */ 9typedef struct DisasContext { 10 DisasContextBase base; 11 const ARMISARegisters *isar; 12 13 /* The address of the current instruction being translated. */ 14 target_ulong pc_curr; 15 target_ulong page_start; 16 uint32_t insn; 17 /* Nonzero if this instruction has been conditionally skipped. */ 18 int condjmp; 19 /* The label that will be jumped to when the instruction is skipped. */ 20 TCGLabel *condlabel; 21 /* Thumb-2 conditional execution bits. */ 22 int condexec_mask; 23 int condexec_cond; 24 /* M-profile ECI/ICI exception-continuable instruction state */ 25 int eci; 26 /* 27 * trans_ functions for insns which are continuable should set this true 28 * after decode (ie after any UNDEF checks) 29 */ 30 bool eci_handled; 31 /* TCG op to rewind to if this turns out to be an invalid ECI state */ 32 TCGOp *insn_eci_rewind; 33 int thumb; 34 int sctlr_b; 35 MemOp be_data; 36#if !defined(CONFIG_USER_ONLY) 37 int user; 38#endif 39 ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ 40 uint8_t tbii; /* TBI1|TBI0 for insns */ 41 uint8_t tbid; /* TBI1|TBI0 for data */ 42 uint8_t tcma; /* TCMA1|TCMA0 for MTE */ 43 bool ns; /* Use non-secure CPREG bank on access */ 44 int fp_excp_el; /* FP exception EL or 0 if enabled */ 45 int sve_excp_el; /* SVE exception EL or 0 if enabled */ 46 int sve_len; /* SVE vector length in bytes */ 47 /* Flag indicating that exceptions from secure mode are routed to EL3. */ 48 bool secure_routed_to_el3; 49 bool vfp_enabled; /* FP enabled via FPSCR.EN */ 50 int vec_len; 51 int vec_stride; 52 bool v7m_handler_mode; 53 bool v8m_secure; /* true if v8M and we're in Secure mode */ 54 bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */ 55 bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */ 56 bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */ 57 bool v7m_lspact; /* FPCCR.LSPACT set */ 58 /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI 59 * so that top level loop can generate correct syndrome information. 60 */ 61 uint32_t svc_imm; 62 int aarch64; 63 int current_el; 64 /* Debug target exception level for single-step exceptions */ 65 int debug_target_el; 66 GHashTable *cp_regs; 67 uint64_t features; /* CPU features bits */ 68 /* Because unallocated encodings generate different exception syndrome 69 * information from traps due to FP being disabled, we can't do a single 70 * "is fp access disabled" check at a high level in the decode tree. 71 * To help in catching bugs where the access check was forgotten in some 72 * code path, we set this flag when the access check is done, and assert 73 * that it is set at the point where we actually touch the FP regs. 74 */ 75 bool fp_access_checked; 76 bool sve_access_checked; 77 /* ARMv8 single-step state (this is distinct from the QEMU gdbstub 78 * single-step support). 79 */ 80 bool ss_active; 81 bool pstate_ss; 82 /* True if the insn just emitted was a load-exclusive instruction 83 * (necessary for syndrome information for single step exceptions), 84 * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. 85 */ 86 bool is_ldex; 87 /* True if AccType_UNPRIV should be used for LDTR et al */ 88 bool unpriv; 89 /* True if v8.3-PAuth is active. */ 90 bool pauth_active; 91 /* True if v8.5-MTE access to tags is enabled. */ 92 bool ata; 93 /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ 94 bool mte_active[2]; 95 /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ 96 bool bt; 97 /* True if any CP15 access is trapped by HSTR_EL2 */ 98 bool hstr_active; 99 /* True if memory operations require alignment */ 100 bool align_mem; 101 /* True if PSTATE.IL is set */ 102 bool pstate_il; 103 /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ 104 bool mve_no_pred; 105 /* 106 * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. 107 * < 0, set by the current instruction. 108 */ 109 int8_t btype; 110 /* A copy of cpu->dcz_blocksize. */ 111 uint8_t dcz_blocksize; 112 /* True if this page is guarded. */ 113 bool guarded_page; 114 /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ 115 int c15_cpar; 116 /* TCG op of the current insn_start. */ 117 TCGOp *insn_start; 118#define TMP_A64_MAX 16 119 int tmp_a64_count; 120 TCGv_i64 tmp_a64[TMP_A64_MAX]; 121} DisasContext; 122 123typedef struct DisasCompare { 124 TCGCond cond; 125 TCGv_i32 value; 126 bool value_global; 127} DisasCompare; 128 129/* Share the TCG temporaries common between 32 and 64 bit modes. */ 130extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF; 131extern TCGv_i64 cpu_exclusive_addr; 132extern TCGv_i64 cpu_exclusive_val; 133 134/* 135 * Constant expanders for the decoders. 136 */ 137 138static inline int negate(DisasContext *s, int x) 139{ 140 return -x; 141} 142 143static inline int plus_1(DisasContext *s, int x) 144{ 145 return x + 1; 146} 147 148static inline int plus_2(DisasContext *s, int x) 149{ 150 return x + 2; 151} 152 153static inline int times_2(DisasContext *s, int x) 154{ 155 return x * 2; 156} 157 158static inline int times_4(DisasContext *s, int x) 159{ 160 return x * 4; 161} 162 163static inline int times_2_plus_1(DisasContext *s, int x) 164{ 165 return x * 2 + 1; 166} 167 168static inline int rsub_64(DisasContext *s, int x) 169{ 170 return 64 - x; 171} 172 173static inline int rsub_32(DisasContext *s, int x) 174{ 175 return 32 - x; 176} 177 178static inline int rsub_16(DisasContext *s, int x) 179{ 180 return 16 - x; 181} 182 183static inline int rsub_8(DisasContext *s, int x) 184{ 185 return 8 - x; 186} 187 188static inline int neon_3same_fp_size(DisasContext *s, int x) 189{ 190 /* Convert 0==fp32, 1==fp16 into a MO_* value */ 191 return MO_32 - x; 192} 193 194static inline int arm_dc_feature(DisasContext *dc, int feature) 195{ 196 return (dc->features & (1ULL << feature)) != 0; 197} 198 199static inline int get_mem_index(DisasContext *s) 200{ 201 return arm_to_core_mmu_idx(s->mmu_idx); 202} 203 204/* Function used to determine the target exception EL when otherwise not known 205 * or default. 206 */ 207static inline int default_exception_el(DisasContext *s) 208{ 209 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then 210 * there is no secure EL1, so we route exceptions to EL3. Otherwise, 211 * exceptions can only be routed to ELs above 1, so we target the higher of 212 * 1 or the current EL. 213 */ 214 return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3) 215 ? 3 : MAX(1, s->current_el); 216} 217 218static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) 219{ 220 /* We don't need to save all of the syndrome so we mask and shift 221 * out unneeded bits to help the sleb128 encoder do a better job. 222 */ 223 syn &= ARM_INSN_START_WORD2_MASK; 224 syn >>= ARM_INSN_START_WORD2_SHIFT; 225 226 /* We check and clear insn_start_idx to catch multiple updates. */ 227 assert(s->insn_start != NULL); 228 tcg_set_insn_start_param(s->insn_start, 2, syn); 229 s->insn_start = NULL; 230} 231 232/* is_jmp field values */ 233#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 234/* CPU state was modified dynamically; exit to main loop for interrupts. */ 235#define DISAS_UPDATE_EXIT DISAS_TARGET_1 236/* These instructions trap after executing, so the A32/T32 decoder must 237 * defer them until after the conditional execution state has been updated. 238 * WFI also needs special handling when single-stepping. 239 */ 240#define DISAS_WFI DISAS_TARGET_2 241#define DISAS_SWI DISAS_TARGET_3 242/* WFE */ 243#define DISAS_WFE DISAS_TARGET_4 244#define DISAS_HVC DISAS_TARGET_5 245#define DISAS_SMC DISAS_TARGET_6 246#define DISAS_YIELD DISAS_TARGET_7 247/* M profile branch which might be an exception return (and so needs 248 * custom end-of-TB code) 249 */ 250#define DISAS_BX_EXCRET DISAS_TARGET_8 251/* 252 * For instructions which want an immediate exit to the main loop, as opposed 253 * to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this 254 * doesn't write the PC on exiting the translation loop so you need to ensure 255 * something (gen_a64_set_pc_im or runtime helper) has done so before we reach 256 * return from cpu_tb_exec. 257 */ 258#define DISAS_EXIT DISAS_TARGET_9 259/* CPU state was modified dynamically; no need to exit, but do not chain. */ 260#define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10 261 262#ifdef TARGET_AARCH64 263void a64_translate_init(void); 264void gen_a64_set_pc_im(uint64_t val); 265extern const TranslatorOps aarch64_translator_ops; 266#else 267static inline void a64_translate_init(void) 268{ 269} 270 271static inline void gen_a64_set_pc_im(uint64_t val) 272{ 273} 274#endif 275 276void arm_test_cc(DisasCompare *cmp, int cc); 277void arm_free_cc(DisasCompare *cmp); 278void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); 279void arm_gen_test_cc(int cc, TCGLabel *label); 280MemOp pow2_align(unsigned i); 281void unallocated_encoding(DisasContext *s); 282void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, 283 uint32_t syn, uint32_t target_el); 284 285/* Return state of Alternate Half-precision flag, caller frees result */ 286static inline TCGv_i32 get_ahp_flag(void) 287{ 288 TCGv_i32 ret = tcg_temp_new_i32(); 289 290 tcg_gen_ld_i32(ret, cpu_env, 291 offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR])); 292 tcg_gen_extract_i32(ret, ret, 26, 1); 293 294 return ret; 295} 296 297/* Set bits within PSTATE. */ 298static inline void set_pstate_bits(uint32_t bits) 299{ 300 TCGv_i32 p = tcg_temp_new_i32(); 301 302 tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); 303 304 tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 305 tcg_gen_ori_i32(p, p, bits); 306 tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 307 tcg_temp_free_i32(p); 308} 309 310/* Clear bits within PSTATE. */ 311static inline void clear_pstate_bits(uint32_t bits) 312{ 313 TCGv_i32 p = tcg_temp_new_i32(); 314 315 tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); 316 317 tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 318 tcg_gen_andi_i32(p, p, ~bits); 319 tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 320 tcg_temp_free_i32(p); 321} 322 323/* If the singlestep state is Active-not-pending, advance to Active-pending. */ 324static inline void gen_ss_advance(DisasContext *s) 325{ 326 if (s->ss_active) { 327 s->pstate_ss = 0; 328 clear_pstate_bits(PSTATE_SS); 329 } 330} 331 332static inline void gen_exception(int excp, uint32_t syndrome, 333 uint32_t target_el) 334{ 335 TCGv_i32 tcg_excp = tcg_const_i32(excp); 336 TCGv_i32 tcg_syn = tcg_const_i32(syndrome); 337 TCGv_i32 tcg_el = tcg_const_i32(target_el); 338 339 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, 340 tcg_syn, tcg_el); 341 342 tcg_temp_free_i32(tcg_el); 343 tcg_temp_free_i32(tcg_syn); 344 tcg_temp_free_i32(tcg_excp); 345} 346 347/* Generate an architectural singlestep exception */ 348static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) 349{ 350 bool same_el = (s->debug_target_el == s->current_el); 351 352 /* 353 * If singlestep is targeting a lower EL than the current one, 354 * then s->ss_active must be false and we can never get here. 355 */ 356 assert(s->debug_target_el >= s->current_el); 357 358 gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el); 359} 360 361/* 362 * Given a VFP floating point constant encoded into an 8 bit immediate in an 363 * instruction, expand it to the actual constant value of the specified 364 * size, as per the VFPExpandImm() pseudocode in the Arm ARM. 365 */ 366uint64_t vfp_expand_imm(int size, uint8_t imm8); 367 368/* Vector operations shared between ARM and AArch64. */ 369void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 370 uint32_t opr_sz, uint32_t max_sz); 371void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 372 uint32_t opr_sz, uint32_t max_sz); 373void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 374 uint32_t opr_sz, uint32_t max_sz); 375void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 376 uint32_t opr_sz, uint32_t max_sz); 377void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 378 uint32_t opr_sz, uint32_t max_sz); 379 380void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 381 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 382void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 383 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 384 385void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 386 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 387void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 388 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 389void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 390 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 391 392void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 393void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); 394void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); 395void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 396void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 397 398void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 399 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 400void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 401 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 402void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 403 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 404void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 405 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 406 407void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 408 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 409void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 410 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 411 412void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 413 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 414void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 415 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 416void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 417 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 418void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 419 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 420 421void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 422 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 423void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 424 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 425 426void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 427 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 428void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 429 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 430 431void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 432 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 433void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 434 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 435 436void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 437 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 438void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 439 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 440 441/* 442 * Forward to the isar_feature_* tests given a DisasContext pointer. 443 */ 444#define dc_isar_feature(name, ctx) \ 445 ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); }) 446 447/* Note that the gvec expanders operate on offsets + sizes. */ 448typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t); 449typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, 450 uint32_t, uint32_t); 451typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, 452 uint32_t, uint32_t, uint32_t); 453typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t, 454 uint32_t, uint32_t, uint32_t); 455 456/* Function prototype for gen_ functions for calling Neon helpers */ 457typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32); 458typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32); 459typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32); 460typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); 461typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32, 462 TCGv_i32, TCGv_i32); 463typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64); 464typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); 465typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64); 466typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64); 467typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32); 468typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32); 469typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr); 470typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 471typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); 472typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64); 473typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); 474typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); 475typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); 476typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); 477typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); 478typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32); 479typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift); 480typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); 481 482/** 483 * arm_tbflags_from_tb: 484 * @tb: the TranslationBlock 485 * 486 * Extract the flag values from @tb. 487 */ 488static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) 489{ 490 return (CPUARMTBFlags){ tb->flags, tb->cs_base }; 491} 492 493/* 494 * Enum for argument to fpstatus_ptr(). 495 */ 496typedef enum ARMFPStatusFlavour { 497 FPST_FPCR, 498 FPST_FPCR_F16, 499 FPST_STD, 500 FPST_STD_F16, 501} ARMFPStatusFlavour; 502 503/** 504 * fpstatus_ptr: return TCGv_ptr to the specified fp_status field 505 * 506 * We have multiple softfloat float_status fields in the Arm CPU state struct 507 * (see the comment in cpu.h for details). Return a TCGv_ptr which has 508 * been set up to point to the requested field in the CPU state struct. 509 * The options are: 510 * 511 * FPST_FPCR 512 * for non-FP16 operations controlled by the FPCR 513 * FPST_FPCR_F16 514 * for operations controlled by the FPCR where FPCR.FZ16 is to be used 515 * FPST_STD 516 * for A32/T32 Neon operations using the "standard FPSCR value" 517 * FPST_STD_F16 518 * as FPST_STD, but where FPCR.FZ16 is to be used 519 */ 520static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) 521{ 522 TCGv_ptr statusptr = tcg_temp_new_ptr(); 523 int offset; 524 525 switch (flavour) { 526 case FPST_FPCR: 527 offset = offsetof(CPUARMState, vfp.fp_status); 528 break; 529 case FPST_FPCR_F16: 530 offset = offsetof(CPUARMState, vfp.fp_status_f16); 531 break; 532 case FPST_STD: 533 offset = offsetof(CPUARMState, vfp.standard_fp_status); 534 break; 535 case FPST_STD_F16: 536 offset = offsetof(CPUARMState, vfp.standard_fp_status_f16); 537 break; 538 default: 539 g_assert_not_reached(); 540 } 541 tcg_gen_addi_ptr(statusptr, cpu_env, offset); 542 return statusptr; 543} 544 545/** 546 * finalize_memop: 547 * @s: DisasContext 548 * @opc: size+sign+align of the memory operation 549 * 550 * Build the complete MemOp for a memory operation, including alignment 551 * and endianness. 552 * 553 * If (op & MO_AMASK) then the operation already contains the required 554 * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally 555 * unaligned operation, e.g. for AccType_NORMAL. 556 * 557 * In the latter case, there are configuration bits that require alignment, 558 * and this is applied here. Note that there is no way to indicate that 559 * no alignment should ever be enforced; this must be handled manually. 560 */ 561static inline MemOp finalize_memop(DisasContext *s, MemOp opc) 562{ 563 if (s->align_mem && !(opc & MO_AMASK)) { 564 opc |= MO_ALIGN; 565 } 566 return opc | s->be_data; 567} 568 569/** 570 * asimd_imm_const: Expand an encoded SIMD constant value 571 * 572 * Expand a SIMD constant value. This is essentially the pseudocode 573 * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for 574 * VMVN and VBIC (when cmode < 14 && op == 1). 575 * 576 * The combination cmode == 15 op == 1 is a reserved encoding for AArch32; 577 * callers must catch this; we return the 64-bit constant value defined 578 * for AArch64. 579 * 580 * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but 581 * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A; 582 * we produce an immediate constant value of 0 in these cases. 583 */ 584uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); 585 586#endif /* TARGET_ARM_TRANSLATE_H */