vmx-impl.c.inc (69637B)
1/* 2 * translate/vmx-impl.c 3 * 4 * Altivec/VMX translation 5 */ 6 7/*** Altivec vector extension ***/ 8/* Altivec registers moves */ 9 10static inline TCGv_ptr gen_avr_ptr(int reg) 11{ 12 TCGv_ptr r = tcg_temp_new_ptr(); 13 tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg)); 14 return r; 15} 16 17#define GEN_VR_LDX(name, opc2, opc3) \ 18static void glue(gen_, name)(DisasContext *ctx) \ 19{ \ 20 TCGv EA; \ 21 TCGv_i64 avr; \ 22 if (unlikely(!ctx->altivec_enabled)) { \ 23 gen_exception(ctx, POWERPC_EXCP_VPU); \ 24 return; \ 25 } \ 26 gen_set_access_type(ctx, ACCESS_INT); \ 27 avr = tcg_temp_new_i64(); \ 28 EA = tcg_temp_new(); \ 29 gen_addr_reg_index(ctx, EA); \ 30 tcg_gen_andi_tl(EA, EA, ~0xf); \ 31 /* \ 32 * We only need to swap high and low halves. gen_qemu_ld64_i64 \ 33 * does necessary 64-bit byteswap already. \ 34 */ \ 35 if (ctx->le_mode) { \ 36 gen_qemu_ld64_i64(ctx, avr, EA); \ 37 set_avr64(rD(ctx->opcode), avr, false); \ 38 tcg_gen_addi_tl(EA, EA, 8); \ 39 gen_qemu_ld64_i64(ctx, avr, EA); \ 40 set_avr64(rD(ctx->opcode), avr, true); \ 41 } else { \ 42 gen_qemu_ld64_i64(ctx, avr, EA); \ 43 set_avr64(rD(ctx->opcode), avr, true); \ 44 tcg_gen_addi_tl(EA, EA, 8); \ 45 gen_qemu_ld64_i64(ctx, avr, EA); \ 46 set_avr64(rD(ctx->opcode), avr, false); \ 47 } \ 48 tcg_temp_free(EA); \ 49 tcg_temp_free_i64(avr); \ 50} 51 52#define GEN_VR_STX(name, opc2, opc3) \ 53static void gen_st##name(DisasContext *ctx) \ 54{ \ 55 TCGv EA; \ 56 TCGv_i64 avr; \ 57 if (unlikely(!ctx->altivec_enabled)) { \ 58 gen_exception(ctx, POWERPC_EXCP_VPU); \ 59 return; \ 60 } \ 61 gen_set_access_type(ctx, ACCESS_INT); \ 62 avr = tcg_temp_new_i64(); \ 63 EA = tcg_temp_new(); \ 64 gen_addr_reg_index(ctx, EA); \ 65 tcg_gen_andi_tl(EA, EA, ~0xf); \ 66 /* \ 67 * We only need to swap high and low halves. gen_qemu_st64_i64 \ 68 * does necessary 64-bit byteswap already. \ 69 */ \ 70 if (ctx->le_mode) { \ 71 get_avr64(avr, rD(ctx->opcode), false); \ 72 gen_qemu_st64_i64(ctx, avr, EA); \ 73 tcg_gen_addi_tl(EA, EA, 8); \ 74 get_avr64(avr, rD(ctx->opcode), true); \ 75 gen_qemu_st64_i64(ctx, avr, EA); \ 76 } else { \ 77 get_avr64(avr, rD(ctx->opcode), true); \ 78 gen_qemu_st64_i64(ctx, avr, EA); \ 79 tcg_gen_addi_tl(EA, EA, 8); \ 80 get_avr64(avr, rD(ctx->opcode), false); \ 81 gen_qemu_st64_i64(ctx, avr, EA); \ 82 } \ 83 tcg_temp_free(EA); \ 84 tcg_temp_free_i64(avr); \ 85} 86 87#define GEN_VR_LVE(name, opc2, opc3, size) \ 88static void gen_lve##name(DisasContext *ctx) \ 89 { \ 90 TCGv EA; \ 91 TCGv_ptr rs; \ 92 if (unlikely(!ctx->altivec_enabled)) { \ 93 gen_exception(ctx, POWERPC_EXCP_VPU); \ 94 return; \ 95 } \ 96 gen_set_access_type(ctx, ACCESS_INT); \ 97 EA = tcg_temp_new(); \ 98 gen_addr_reg_index(ctx, EA); \ 99 if (size > 1) { \ 100 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ 101 } \ 102 rs = gen_avr_ptr(rS(ctx->opcode)); \ 103 gen_helper_lve##name(cpu_env, rs, EA); \ 104 tcg_temp_free(EA); \ 105 tcg_temp_free_ptr(rs); \ 106 } 107 108#define GEN_VR_STVE(name, opc2, opc3, size) \ 109static void gen_stve##name(DisasContext *ctx) \ 110 { \ 111 TCGv EA; \ 112 TCGv_ptr rs; \ 113 if (unlikely(!ctx->altivec_enabled)) { \ 114 gen_exception(ctx, POWERPC_EXCP_VPU); \ 115 return; \ 116 } \ 117 gen_set_access_type(ctx, ACCESS_INT); \ 118 EA = tcg_temp_new(); \ 119 gen_addr_reg_index(ctx, EA); \ 120 if (size > 1) { \ 121 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ 122 } \ 123 rs = gen_avr_ptr(rS(ctx->opcode)); \ 124 gen_helper_stve##name(cpu_env, rs, EA); \ 125 tcg_temp_free(EA); \ 126 tcg_temp_free_ptr(rs); \ 127 } 128 129GEN_VR_LDX(lvx, 0x07, 0x03); 130/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ 131GEN_VR_LDX(lvxl, 0x07, 0x0B); 132 133GEN_VR_LVE(bx, 0x07, 0x00, 1); 134GEN_VR_LVE(hx, 0x07, 0x01, 2); 135GEN_VR_LVE(wx, 0x07, 0x02, 4); 136 137GEN_VR_STX(svx, 0x07, 0x07); 138/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ 139GEN_VR_STX(svxl, 0x07, 0x0F); 140 141GEN_VR_STVE(bx, 0x07, 0x04, 1); 142GEN_VR_STVE(hx, 0x07, 0x05, 2); 143GEN_VR_STVE(wx, 0x07, 0x06, 4); 144 145static void gen_mfvscr(DisasContext *ctx) 146{ 147 TCGv_i32 t; 148 TCGv_i64 avr; 149 if (unlikely(!ctx->altivec_enabled)) { 150 gen_exception(ctx, POWERPC_EXCP_VPU); 151 return; 152 } 153 avr = tcg_temp_new_i64(); 154 tcg_gen_movi_i64(avr, 0); 155 set_avr64(rD(ctx->opcode), avr, true); 156 t = tcg_temp_new_i32(); 157 gen_helper_mfvscr(t, cpu_env); 158 tcg_gen_extu_i32_i64(avr, t); 159 set_avr64(rD(ctx->opcode), avr, false); 160 tcg_temp_free_i32(t); 161 tcg_temp_free_i64(avr); 162} 163 164static void gen_mtvscr(DisasContext *ctx) 165{ 166 TCGv_i32 val; 167 int bofs; 168 169 if (unlikely(!ctx->altivec_enabled)) { 170 gen_exception(ctx, POWERPC_EXCP_VPU); 171 return; 172 } 173 174 val = tcg_temp_new_i32(); 175 bofs = avr_full_offset(rB(ctx->opcode)); 176#ifdef HOST_WORDS_BIGENDIAN 177 bofs += 3 * 4; 178#endif 179 180 tcg_gen_ld_i32(val, cpu_env, bofs); 181 gen_helper_mtvscr(cpu_env, val); 182 tcg_temp_free_i32(val); 183} 184 185#define GEN_VX_VMUL10(name, add_cin, ret_carry) \ 186static void glue(gen_, name)(DisasContext *ctx) \ 187{ \ 188 TCGv_i64 t0; \ 189 TCGv_i64 t1; \ 190 TCGv_i64 t2; \ 191 TCGv_i64 avr; \ 192 TCGv_i64 ten, z; \ 193 \ 194 if (unlikely(!ctx->altivec_enabled)) { \ 195 gen_exception(ctx, POWERPC_EXCP_VPU); \ 196 return; \ 197 } \ 198 \ 199 t0 = tcg_temp_new_i64(); \ 200 t1 = tcg_temp_new_i64(); \ 201 t2 = tcg_temp_new_i64(); \ 202 avr = tcg_temp_new_i64(); \ 203 ten = tcg_const_i64(10); \ 204 z = tcg_const_i64(0); \ 205 \ 206 if (add_cin) { \ 207 get_avr64(avr, rA(ctx->opcode), false); \ 208 tcg_gen_mulu2_i64(t0, t1, avr, ten); \ 209 get_avr64(avr, rB(ctx->opcode), false); \ 210 tcg_gen_andi_i64(t2, avr, 0xF); \ 211 tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \ 212 set_avr64(rD(ctx->opcode), avr, false); \ 213 } else { \ 214 get_avr64(avr, rA(ctx->opcode), false); \ 215 tcg_gen_mulu2_i64(avr, t2, avr, ten); \ 216 set_avr64(rD(ctx->opcode), avr, false); \ 217 } \ 218 \ 219 if (ret_carry) { \ 220 get_avr64(avr, rA(ctx->opcode), true); \ 221 tcg_gen_mulu2_i64(t0, t1, avr, ten); \ 222 tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \ 223 set_avr64(rD(ctx->opcode), avr, false); \ 224 set_avr64(rD(ctx->opcode), z, true); \ 225 } else { \ 226 get_avr64(avr, rA(ctx->opcode), true); \ 227 tcg_gen_mul_i64(t0, avr, ten); \ 228 tcg_gen_add_i64(avr, t0, t2); \ 229 set_avr64(rD(ctx->opcode), avr, true); \ 230 } \ 231 \ 232 tcg_temp_free_i64(t0); \ 233 tcg_temp_free_i64(t1); \ 234 tcg_temp_free_i64(t2); \ 235 tcg_temp_free_i64(avr); \ 236 tcg_temp_free_i64(ten); \ 237 tcg_temp_free_i64(z); \ 238} \ 239 240GEN_VX_VMUL10(vmul10uq, 0, 0); 241GEN_VX_VMUL10(vmul10euq, 1, 0); 242GEN_VX_VMUL10(vmul10cuq, 0, 1); 243GEN_VX_VMUL10(vmul10ecuq, 1, 1); 244 245#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \ 246static void glue(gen_, name)(DisasContext *ctx) \ 247{ \ 248 if (unlikely(!ctx->altivec_enabled)) { \ 249 gen_exception(ctx, POWERPC_EXCP_VPU); \ 250 return; \ 251 } \ 252 \ 253 tcg_op(vece, \ 254 avr_full_offset(rD(ctx->opcode)), \ 255 avr_full_offset(rA(ctx->opcode)), \ 256 avr_full_offset(rB(ctx->opcode)), \ 257 16, 16); \ 258} 259 260/* Logical operations */ 261GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16); 262GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17); 263GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18); 264GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19); 265GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20); 266GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26); 267GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22); 268GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); 269 270#define GEN_VXFORM(name, opc2, opc3) \ 271static void glue(gen_, name)(DisasContext *ctx) \ 272{ \ 273 TCGv_ptr ra, rb, rd; \ 274 if (unlikely(!ctx->altivec_enabled)) { \ 275 gen_exception(ctx, POWERPC_EXCP_VPU); \ 276 return; \ 277 } \ 278 ra = gen_avr_ptr(rA(ctx->opcode)); \ 279 rb = gen_avr_ptr(rB(ctx->opcode)); \ 280 rd = gen_avr_ptr(rD(ctx->opcode)); \ 281 gen_helper_##name(rd, ra, rb); \ 282 tcg_temp_free_ptr(ra); \ 283 tcg_temp_free_ptr(rb); \ 284 tcg_temp_free_ptr(rd); \ 285} 286 287#define GEN_VXFORM_TRANS(name, opc2, opc3) \ 288static void glue(gen_, name)(DisasContext *ctx) \ 289{ \ 290 if (unlikely(!ctx->altivec_enabled)) { \ 291 gen_exception(ctx, POWERPC_EXCP_VPU); \ 292 return; \ 293 } \ 294 trans_##name(ctx); \ 295} 296 297#define GEN_VXFORM_ENV(name, opc2, opc3) \ 298static void glue(gen_, name)(DisasContext *ctx) \ 299{ \ 300 TCGv_ptr ra, rb, rd; \ 301 if (unlikely(!ctx->altivec_enabled)) { \ 302 gen_exception(ctx, POWERPC_EXCP_VPU); \ 303 return; \ 304 } \ 305 ra = gen_avr_ptr(rA(ctx->opcode)); \ 306 rb = gen_avr_ptr(rB(ctx->opcode)); \ 307 rd = gen_avr_ptr(rD(ctx->opcode)); \ 308 gen_helper_##name(cpu_env, rd, ra, rb); \ 309 tcg_temp_free_ptr(ra); \ 310 tcg_temp_free_ptr(rb); \ 311 tcg_temp_free_ptr(rd); \ 312} 313 314#define GEN_VXFORM3(name, opc2, opc3) \ 315static void glue(gen_, name)(DisasContext *ctx) \ 316{ \ 317 TCGv_ptr ra, rb, rc, rd; \ 318 if (unlikely(!ctx->altivec_enabled)) { \ 319 gen_exception(ctx, POWERPC_EXCP_VPU); \ 320 return; \ 321 } \ 322 ra = gen_avr_ptr(rA(ctx->opcode)); \ 323 rb = gen_avr_ptr(rB(ctx->opcode)); \ 324 rc = gen_avr_ptr(rC(ctx->opcode)); \ 325 rd = gen_avr_ptr(rD(ctx->opcode)); \ 326 gen_helper_##name(rd, ra, rb, rc); \ 327 tcg_temp_free_ptr(ra); \ 328 tcg_temp_free_ptr(rb); \ 329 tcg_temp_free_ptr(rc); \ 330 tcg_temp_free_ptr(rd); \ 331} 332 333/* 334 * Support for Altivec instruction pairs that use bit 31 (Rc) as 335 * an opcode bit. In general, these pairs come from different 336 * versions of the ISA, so we must also support a pair of flags for 337 * each instruction. 338 */ 339#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ 340static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 341{ \ 342 if ((Rc(ctx->opcode) == 0) && \ 343 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 344 gen_##name0(ctx); \ 345 } else if ((Rc(ctx->opcode) == 1) && \ 346 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 347 gen_##name1(ctx); \ 348 } else { \ 349 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 350 } \ 351} 352 353/* 354 * We use this macro if one instruction is realized with direct 355 * translation, and second one with helper. 356 */ 357#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\ 358static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 359{ \ 360 if ((Rc(ctx->opcode) == 0) && \ 361 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 362 if (unlikely(!ctx->altivec_enabled)) { \ 363 gen_exception(ctx, POWERPC_EXCP_VPU); \ 364 return; \ 365 } \ 366 trans_##name0(ctx); \ 367 } else if ((Rc(ctx->opcode) == 1) && \ 368 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 369 gen_##name1(ctx); \ 370 } else { \ 371 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 372 } \ 373} 374 375/* Adds support to provide invalid mask */ 376#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \ 377 name1, flg1, flg2_1, inval1) \ 378static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 379{ \ 380 if ((Rc(ctx->opcode) == 0) && \ 381 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \ 382 !(ctx->opcode & inval0)) { \ 383 gen_##name0(ctx); \ 384 } else if ((Rc(ctx->opcode) == 1) && \ 385 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \ 386 !(ctx->opcode & inval1)) { \ 387 gen_##name1(ctx); \ 388 } else { \ 389 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 390 } \ 391} 392 393#define GEN_VXFORM_HETRO(name, opc2, opc3) \ 394static void glue(gen_, name)(DisasContext *ctx) \ 395{ \ 396 TCGv_ptr rb; \ 397 if (unlikely(!ctx->altivec_enabled)) { \ 398 gen_exception(ctx, POWERPC_EXCP_VPU); \ 399 return; \ 400 } \ 401 rb = gen_avr_ptr(rB(ctx->opcode)); \ 402 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ 403 tcg_temp_free_ptr(rb); \ 404} 405 406GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); 407GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \ 408 vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800) 409GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1); 410GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \ 411 vmul10ecuq, PPC_NONE, PPC2_ISA300) 412GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2); 413GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3); 414GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16); 415GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17); 416GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18); 417GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19); 418GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0); 419GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1); 420GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2); 421GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3); 422GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4); 423GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5); 424GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6); 425GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7); 426GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8); 427GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9); 428GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10); 429GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11); 430GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); 431GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); 432GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); 433GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); 434GEN_VXFORM(vavgub, 1, 16); 435GEN_VXFORM(vabsdub, 1, 16); 436GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \ 437 vabsdub, PPC_NONE, PPC2_ISA300) 438GEN_VXFORM(vavguh, 1, 17); 439GEN_VXFORM(vabsduh, 1, 17); 440GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \ 441 vabsduh, PPC_NONE, PPC2_ISA300) 442GEN_VXFORM(vavguw, 1, 18); 443GEN_VXFORM(vabsduw, 1, 18); 444GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \ 445 vabsduw, PPC_NONE, PPC2_ISA300) 446GEN_VXFORM(vavgsb, 1, 20); 447GEN_VXFORM(vavgsh, 1, 21); 448GEN_VXFORM(vavgsw, 1, 22); 449GEN_VXFORM(vmrghb, 6, 0); 450GEN_VXFORM(vmrghh, 6, 1); 451GEN_VXFORM(vmrghw, 6, 2); 452GEN_VXFORM(vmrglb, 6, 4); 453GEN_VXFORM(vmrglh, 6, 5); 454GEN_VXFORM(vmrglw, 6, 6); 455 456static void trans_vmrgew(DisasContext *ctx) 457{ 458 int VT = rD(ctx->opcode); 459 int VA = rA(ctx->opcode); 460 int VB = rB(ctx->opcode); 461 TCGv_i64 tmp = tcg_temp_new_i64(); 462 TCGv_i64 avr = tcg_temp_new_i64(); 463 464 get_avr64(avr, VB, true); 465 tcg_gen_shri_i64(tmp, avr, 32); 466 get_avr64(avr, VA, true); 467 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); 468 set_avr64(VT, avr, true); 469 470 get_avr64(avr, VB, false); 471 tcg_gen_shri_i64(tmp, avr, 32); 472 get_avr64(avr, VA, false); 473 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); 474 set_avr64(VT, avr, false); 475 476 tcg_temp_free_i64(tmp); 477 tcg_temp_free_i64(avr); 478} 479 480static void trans_vmrgow(DisasContext *ctx) 481{ 482 int VT = rD(ctx->opcode); 483 int VA = rA(ctx->opcode); 484 int VB = rB(ctx->opcode); 485 TCGv_i64 t0 = tcg_temp_new_i64(); 486 TCGv_i64 t1 = tcg_temp_new_i64(); 487 TCGv_i64 avr = tcg_temp_new_i64(); 488 489 get_avr64(t0, VB, true); 490 get_avr64(t1, VA, true); 491 tcg_gen_deposit_i64(avr, t0, t1, 32, 32); 492 set_avr64(VT, avr, true); 493 494 get_avr64(t0, VB, false); 495 get_avr64(t1, VA, false); 496 tcg_gen_deposit_i64(avr, t0, t1, 32, 32); 497 set_avr64(VT, avr, false); 498 499 tcg_temp_free_i64(t0); 500 tcg_temp_free_i64(t1); 501 tcg_temp_free_i64(avr); 502} 503 504/* 505 * lvsl VRT,RA,RB - Load Vector for Shift Left 506 * 507 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31]. 508 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. 509 * Bytes sh:sh+15 of X are placed into vD. 510 */ 511static void trans_lvsl(DisasContext *ctx) 512{ 513 int VT = rD(ctx->opcode); 514 TCGv_i64 result = tcg_temp_new_i64(); 515 TCGv_i64 sh = tcg_temp_new_i64(); 516 TCGv EA = tcg_temp_new(); 517 518 /* Get sh(from description) by anding EA with 0xf. */ 519 gen_addr_reg_index(ctx, EA); 520 tcg_gen_extu_tl_i64(sh, EA); 521 tcg_gen_andi_i64(sh, sh, 0xfULL); 522 523 /* 524 * Create bytes sh:sh+7 of X(from description) and place them in 525 * higher doubleword of vD. 526 */ 527 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); 528 tcg_gen_addi_i64(result, sh, 0x0001020304050607ull); 529 set_avr64(VT, result, true); 530 /* 531 * Create bytes sh+8:sh+15 of X(from description) and place them in 532 * lower doubleword of vD. 533 */ 534 tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL); 535 set_avr64(VT, result, false); 536 537 tcg_temp_free_i64(result); 538 tcg_temp_free_i64(sh); 539 tcg_temp_free(EA); 540} 541 542/* 543 * lvsr VRT,RA,RB - Load Vector for Shift Right 544 * 545 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31]. 546 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. 547 * Bytes (16-sh):(31-sh) of X are placed into vD. 548 */ 549static void trans_lvsr(DisasContext *ctx) 550{ 551 int VT = rD(ctx->opcode); 552 TCGv_i64 result = tcg_temp_new_i64(); 553 TCGv_i64 sh = tcg_temp_new_i64(); 554 TCGv EA = tcg_temp_new(); 555 556 557 /* Get sh(from description) by anding EA with 0xf. */ 558 gen_addr_reg_index(ctx, EA); 559 tcg_gen_extu_tl_i64(sh, EA); 560 tcg_gen_andi_i64(sh, sh, 0xfULL); 561 562 /* 563 * Create bytes (16-sh):(23-sh) of X(from description) and place them in 564 * higher doubleword of vD. 565 */ 566 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); 567 tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh); 568 set_avr64(VT, result, true); 569 /* 570 * Create bytes (24-sh):(32-sh) of X(from description) and place them in 571 * lower doubleword of vD. 572 */ 573 tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh); 574 set_avr64(VT, result, false); 575 576 tcg_temp_free_i64(result); 577 tcg_temp_free_i64(sh); 578 tcg_temp_free(EA); 579} 580 581/* 582 * vsl VRT,VRA,VRB - Vector Shift Left 583 * 584 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB. 585 * Lowest 3 bits in each byte element of register vB must be identical or 586 * result is undefined. 587 */ 588static void trans_vsl(DisasContext *ctx) 589{ 590 int VT = rD(ctx->opcode); 591 int VA = rA(ctx->opcode); 592 int VB = rB(ctx->opcode); 593 TCGv_i64 avr = tcg_temp_new_i64(); 594 TCGv_i64 sh = tcg_temp_new_i64(); 595 TCGv_i64 carry = tcg_temp_new_i64(); 596 TCGv_i64 tmp = tcg_temp_new_i64(); 597 598 /* Place bits 125-127 of vB in 'sh'. */ 599 get_avr64(avr, VB, false); 600 tcg_gen_andi_i64(sh, avr, 0x07ULL); 601 602 /* 603 * Save highest 'sh' bits of lower doubleword element of vA in variable 604 * 'carry' and perform shift on lower doubleword. 605 */ 606 get_avr64(avr, VA, false); 607 tcg_gen_subfi_i64(tmp, 32, sh); 608 tcg_gen_shri_i64(carry, avr, 32); 609 tcg_gen_shr_i64(carry, carry, tmp); 610 tcg_gen_shl_i64(avr, avr, sh); 611 set_avr64(VT, avr, false); 612 613 /* 614 * Perform shift on higher doubleword element of vA and replace lowest 615 * 'sh' bits with 'carry'. 616 */ 617 get_avr64(avr, VA, true); 618 tcg_gen_shl_i64(avr, avr, sh); 619 tcg_gen_or_i64(avr, avr, carry); 620 set_avr64(VT, avr, true); 621 622 tcg_temp_free_i64(avr); 623 tcg_temp_free_i64(sh); 624 tcg_temp_free_i64(carry); 625 tcg_temp_free_i64(tmp); 626} 627 628/* 629 * vsr VRT,VRA,VRB - Vector Shift Right 630 * 631 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB. 632 * Lowest 3 bits in each byte element of register vB must be identical or 633 * result is undefined. 634 */ 635static void trans_vsr(DisasContext *ctx) 636{ 637 int VT = rD(ctx->opcode); 638 int VA = rA(ctx->opcode); 639 int VB = rB(ctx->opcode); 640 TCGv_i64 avr = tcg_temp_new_i64(); 641 TCGv_i64 sh = tcg_temp_new_i64(); 642 TCGv_i64 carry = tcg_temp_new_i64(); 643 TCGv_i64 tmp = tcg_temp_new_i64(); 644 645 /* Place bits 125-127 of vB in 'sh'. */ 646 get_avr64(avr, VB, false); 647 tcg_gen_andi_i64(sh, avr, 0x07ULL); 648 649 /* 650 * Save lowest 'sh' bits of higher doubleword element of vA in variable 651 * 'carry' and perform shift on higher doubleword. 652 */ 653 get_avr64(avr, VA, true); 654 tcg_gen_subfi_i64(tmp, 32, sh); 655 tcg_gen_shli_i64(carry, avr, 32); 656 tcg_gen_shl_i64(carry, carry, tmp); 657 tcg_gen_shr_i64(avr, avr, sh); 658 set_avr64(VT, avr, true); 659 /* 660 * Perform shift on lower doubleword element of vA and replace highest 661 * 'sh' bits with 'carry'. 662 */ 663 get_avr64(avr, VA, false); 664 tcg_gen_shr_i64(avr, avr, sh); 665 tcg_gen_or_i64(avr, avr, carry); 666 set_avr64(VT, avr, false); 667 668 tcg_temp_free_i64(avr); 669 tcg_temp_free_i64(sh); 670 tcg_temp_free_i64(carry); 671 tcg_temp_free_i64(tmp); 672} 673 674/* 675 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword 676 * 677 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source 678 * register are concatenated and placed into ith byte of appropriate doubleword 679 * element in destination register. 680 * 681 * Following solution is done for both doubleword elements of source register 682 * in parallel, in order to reduce the number of instructions needed(that's why 683 * arrays are used): 684 * First, both doubleword elements of source register vB are placed in 685 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for 686 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of 687 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with 688 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables 689 * have to be shifted right for 7 and 8 places, respectively, in order to get 690 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so 691 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask... 692 * After first 8 iteration(first loop), all the first bits are in their final 693 * places, all second bits but second bit from eight byte are in their places... 694 * only 1 eight bit from eight byte is in it's place). In second loop we do all 695 * operations symmetrically, in order to get other half of bits in their final 696 * spots. Results for first and second doubleword elements are saved in 697 * result[0] and result[1] respectively. In the end those results are saved in 698 * appropriate doubleword element of destination register vD. 699 */ 700static void trans_vgbbd(DisasContext *ctx) 701{ 702 int VT = rD(ctx->opcode); 703 int VB = rB(ctx->opcode); 704 TCGv_i64 tmp = tcg_temp_new_i64(); 705 uint64_t mask = 0x8040201008040201ULL; 706 int i, j; 707 708 TCGv_i64 result[2]; 709 result[0] = tcg_temp_new_i64(); 710 result[1] = tcg_temp_new_i64(); 711 TCGv_i64 avr[2]; 712 avr[0] = tcg_temp_new_i64(); 713 avr[1] = tcg_temp_new_i64(); 714 TCGv_i64 tcg_mask = tcg_temp_new_i64(); 715 716 tcg_gen_movi_i64(tcg_mask, mask); 717 for (j = 0; j < 2; j++) { 718 get_avr64(avr[j], VB, j); 719 tcg_gen_and_i64(result[j], avr[j], tcg_mask); 720 } 721 for (i = 1; i < 8; i++) { 722 tcg_gen_movi_i64(tcg_mask, mask >> (i * 8)); 723 for (j = 0; j < 2; j++) { 724 tcg_gen_shri_i64(tmp, avr[j], i * 7); 725 tcg_gen_and_i64(tmp, tmp, tcg_mask); 726 tcg_gen_or_i64(result[j], result[j], tmp); 727 } 728 } 729 for (i = 1; i < 8; i++) { 730 tcg_gen_movi_i64(tcg_mask, mask << (i * 8)); 731 for (j = 0; j < 2; j++) { 732 tcg_gen_shli_i64(tmp, avr[j], i * 7); 733 tcg_gen_and_i64(tmp, tmp, tcg_mask); 734 tcg_gen_or_i64(result[j], result[j], tmp); 735 } 736 } 737 for (j = 0; j < 2; j++) { 738 set_avr64(VT, result[j], j); 739 } 740 741 tcg_temp_free_i64(tmp); 742 tcg_temp_free_i64(tcg_mask); 743 tcg_temp_free_i64(result[0]); 744 tcg_temp_free_i64(result[1]); 745 tcg_temp_free_i64(avr[0]); 746 tcg_temp_free_i64(avr[1]); 747} 748 749/* 750 * vclzw VRT,VRB - Vector Count Leading Zeros Word 751 * 752 * Counting the number of leading zero bits of each word element in source 753 * register and placing result in appropriate word element of destination 754 * register. 755 */ 756static void trans_vclzw(DisasContext *ctx) 757{ 758 int VT = rD(ctx->opcode); 759 int VB = rB(ctx->opcode); 760 TCGv_i32 tmp = tcg_temp_new_i32(); 761 int i; 762 763 /* Perform count for every word element using tcg_gen_clzi_i32. */ 764 for (i = 0; i < 4; i++) { 765 tcg_gen_ld_i32(tmp, cpu_env, 766 offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4); 767 tcg_gen_clzi_i32(tmp, tmp, 32); 768 tcg_gen_st_i32(tmp, cpu_env, 769 offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); 770 } 771 772 tcg_temp_free_i32(tmp); 773} 774 775/* 776 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword 777 * 778 * Counting the number of leading zero bits of each doubleword element in source 779 * register and placing result in appropriate doubleword element of destination 780 * register. 781 */ 782static void trans_vclzd(DisasContext *ctx) 783{ 784 int VT = rD(ctx->opcode); 785 int VB = rB(ctx->opcode); 786 TCGv_i64 avr = tcg_temp_new_i64(); 787 788 /* high doubleword */ 789 get_avr64(avr, VB, true); 790 tcg_gen_clzi_i64(avr, avr, 64); 791 set_avr64(VT, avr, true); 792 793 /* low doubleword */ 794 get_avr64(avr, VB, false); 795 tcg_gen_clzi_i64(avr, avr, 64); 796 set_avr64(VT, avr, false); 797 798 tcg_temp_free_i64(avr); 799} 800 801GEN_VXFORM(vmuloub, 4, 0); 802GEN_VXFORM(vmulouh, 4, 1); 803GEN_VXFORM(vmulouw, 4, 2); 804GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2); 805GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE, 806 vmuluwm, PPC_NONE, PPC2_ALTIVEC_207) 807GEN_VXFORM(vmulosb, 4, 4); 808GEN_VXFORM(vmulosh, 4, 5); 809GEN_VXFORM(vmulosw, 4, 6); 810GEN_VXFORM_V(vmulld, MO_64, tcg_gen_gvec_mul, 4, 7); 811GEN_VXFORM(vmuleub, 4, 8); 812GEN_VXFORM(vmuleuh, 4, 9); 813GEN_VXFORM(vmuleuw, 4, 10); 814GEN_VXFORM(vmulhuw, 4, 10); 815GEN_VXFORM(vmulhud, 4, 11); 816GEN_VXFORM_DUAL(vmuleuw, PPC_ALTIVEC, PPC_NONE, 817 vmulhuw, PPC_NONE, PPC2_ISA310); 818GEN_VXFORM(vmulesb, 4, 12); 819GEN_VXFORM(vmulesh, 4, 13); 820GEN_VXFORM(vmulesw, 4, 14); 821GEN_VXFORM(vmulhsw, 4, 14); 822GEN_VXFORM_DUAL(vmulesw, PPC_ALTIVEC, PPC_NONE, 823 vmulhsw, PPC_NONE, PPC2_ISA310); 824GEN_VXFORM(vmulhsd, 4, 15); 825GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4); 826GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5); 827GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6); 828GEN_VXFORM(vrlwnm, 2, 6); 829GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \ 830 vrlwnm, PPC_NONE, PPC2_ISA300) 831GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23); 832GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8); 833GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9); 834GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10); 835GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27); 836GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12); 837GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13); 838GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14); 839GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15); 840GEN_VXFORM(vsrv, 2, 28); 841GEN_VXFORM(vslv, 2, 29); 842GEN_VXFORM(vslo, 6, 16); 843GEN_VXFORM(vsro, 6, 17); 844GEN_VXFORM(vaddcuw, 0, 6); 845GEN_VXFORM(vsubcuw, 0, 22); 846 847#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ 848static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ 849 TCGv_vec sat, TCGv_vec a, \ 850 TCGv_vec b) \ 851{ \ 852 TCGv_vec x = tcg_temp_new_vec_matching(t); \ 853 glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b); \ 854 glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \ 855 tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \ 856 tcg_gen_or_vec(VECE, sat, sat, x); \ 857 tcg_temp_free_vec(x); \ 858} \ 859static void glue(gen_, NAME)(DisasContext *ctx) \ 860{ \ 861 static const TCGOpcode vecop_list[] = { \ 862 glue(glue(INDEX_op_, NORM), _vec), \ 863 glue(glue(INDEX_op_, SAT), _vec), \ 864 INDEX_op_cmp_vec, 0 \ 865 }; \ 866 static const GVecGen4 g = { \ 867 .fniv = glue(glue(gen_, NAME), _vec), \ 868 .fno = glue(gen_helper_, NAME), \ 869 .opt_opc = vecop_list, \ 870 .write_aofs = true, \ 871 .vece = VECE, \ 872 }; \ 873 if (unlikely(!ctx->altivec_enabled)) { \ 874 gen_exception(ctx, POWERPC_EXCP_VPU); \ 875 return; \ 876 } \ 877 tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \ 878 offsetof(CPUPPCState, vscr_sat), \ 879 avr_full_offset(rA(ctx->opcode)), \ 880 avr_full_offset(rB(ctx->opcode)), \ 881 16, 16, &g); \ 882} 883 884GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8); 885GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \ 886 vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800) 887GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9); 888GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \ 889 vmul10euq, PPC_NONE, PPC2_ISA300) 890GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10); 891GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12); 892GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13); 893GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14); 894GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24); 895GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25); 896GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); 897GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); 898GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); 899GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); 900GEN_VXFORM(vadduqm, 0, 4); 901GEN_VXFORM(vaddcuq, 0, 5); 902GEN_VXFORM3(vaddeuqm, 30, 0); 903GEN_VXFORM3(vaddecuq, 30, 0); 904GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ 905 vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) 906GEN_VXFORM(vsubuqm, 0, 20); 907GEN_VXFORM(vsubcuq, 0, 21); 908GEN_VXFORM3(vsubeuqm, 31, 0); 909GEN_VXFORM3(vsubecuq, 31, 0); 910GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ 911 vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) 912GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0); 913GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1); 914GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2); 915GEN_VXFORM(vrlwmi, 2, 2); 916GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \ 917 vrlwmi, PPC_NONE, PPC2_ISA300) 918GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3); 919GEN_VXFORM(vrldmi, 2, 3); 920GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ 921 vrldmi, PPC_NONE, PPC2_ISA300) 922GEN_VXFORM_TRANS(vsl, 2, 7); 923GEN_VXFORM(vrldnm, 2, 7); 924GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ 925 vrldnm, PPC_NONE, PPC2_ISA300) 926GEN_VXFORM_TRANS(vsr, 2, 11); 927GEN_VXFORM_ENV(vpkuhum, 7, 0); 928GEN_VXFORM_ENV(vpkuwum, 7, 1); 929GEN_VXFORM_ENV(vpkudum, 7, 17); 930GEN_VXFORM_ENV(vpkuhus, 7, 2); 931GEN_VXFORM_ENV(vpkuwus, 7, 3); 932GEN_VXFORM_ENV(vpkudus, 7, 19); 933GEN_VXFORM_ENV(vpkshus, 7, 4); 934GEN_VXFORM_ENV(vpkswus, 7, 5); 935GEN_VXFORM_ENV(vpksdus, 7, 21); 936GEN_VXFORM_ENV(vpkshss, 7, 6); 937GEN_VXFORM_ENV(vpkswss, 7, 7); 938GEN_VXFORM_ENV(vpksdss, 7, 23); 939GEN_VXFORM(vpkpx, 7, 12); 940GEN_VXFORM_ENV(vsum4ubs, 4, 24); 941GEN_VXFORM_ENV(vsum4sbs, 4, 28); 942GEN_VXFORM_ENV(vsum4shs, 4, 25); 943GEN_VXFORM_ENV(vsum2sws, 4, 26); 944GEN_VXFORM_ENV(vsumsws, 4, 30); 945GEN_VXFORM_ENV(vaddfp, 5, 0); 946GEN_VXFORM_ENV(vsubfp, 5, 1); 947GEN_VXFORM_ENV(vmaxfp, 5, 16); 948GEN_VXFORM_ENV(vminfp, 5, 17); 949GEN_VXFORM_HETRO(vextublx, 6, 24) 950GEN_VXFORM_HETRO(vextuhlx, 6, 25) 951GEN_VXFORM_HETRO(vextuwlx, 6, 26) 952GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207, 953 vextuwlx, PPC_NONE, PPC2_ISA300) 954GEN_VXFORM_HETRO(vextubrx, 6, 28) 955GEN_VXFORM_HETRO(vextuhrx, 6, 29) 956GEN_VXFORM_HETRO(vextuwrx, 6, 30) 957GEN_VXFORM_TRANS(lvsl, 6, 31) 958GEN_VXFORM_TRANS(lvsr, 6, 32) 959GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, 960 vextuwrx, PPC_NONE, PPC2_ISA300) 961 962#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ 963static void glue(gen_, name)(DisasContext *ctx) \ 964 { \ 965 TCGv_ptr ra, rb, rd; \ 966 if (unlikely(!ctx->altivec_enabled)) { \ 967 gen_exception(ctx, POWERPC_EXCP_VPU); \ 968 return; \ 969 } \ 970 ra = gen_avr_ptr(rA(ctx->opcode)); \ 971 rb = gen_avr_ptr(rB(ctx->opcode)); \ 972 rd = gen_avr_ptr(rD(ctx->opcode)); \ 973 gen_helper_##opname(cpu_env, rd, ra, rb); \ 974 tcg_temp_free_ptr(ra); \ 975 tcg_temp_free_ptr(rb); \ 976 tcg_temp_free_ptr(rd); \ 977 } 978 979#define GEN_VXRFORM(name, opc2, opc3) \ 980 GEN_VXRFORM1(name, name, #name, opc2, opc3) \ 981 GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) 982 983/* 984 * Support for Altivec instructions that use bit 31 (Rc) as an opcode 985 * bit but also use bit 21 as an actual Rc bit. In general, thse pairs 986 * come from different versions of the ISA, so we must also support a 987 * pair of flags for each instruction. 988 */ 989#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ 990static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 991{ \ 992 if ((Rc(ctx->opcode) == 0) && \ 993 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 994 if (Rc21(ctx->opcode) == 0) { \ 995 gen_##name0(ctx); \ 996 } else { \ 997 gen_##name0##_(ctx); \ 998 } \ 999 } else if ((Rc(ctx->opcode) == 1) && \ 1000 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 1001 if (Rc21(ctx->opcode) == 0) { \ 1002 gen_##name1(ctx); \ 1003 } else { \ 1004 gen_##name1##_(ctx); \ 1005 } \ 1006 } else { \ 1007 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 1008 } \ 1009} 1010 1011GEN_VXRFORM(vcmpequb, 3, 0) 1012GEN_VXRFORM(vcmpequh, 3, 1) 1013GEN_VXRFORM(vcmpequw, 3, 2) 1014GEN_VXRFORM(vcmpequd, 3, 3) 1015GEN_VXRFORM(vcmpnezb, 3, 4) 1016GEN_VXRFORM(vcmpnezh, 3, 5) 1017GEN_VXRFORM(vcmpnezw, 3, 6) 1018GEN_VXRFORM(vcmpgtsb, 3, 12) 1019GEN_VXRFORM(vcmpgtsh, 3, 13) 1020GEN_VXRFORM(vcmpgtsw, 3, 14) 1021GEN_VXRFORM(vcmpgtsd, 3, 15) 1022GEN_VXRFORM(vcmpgtub, 3, 8) 1023GEN_VXRFORM(vcmpgtuh, 3, 9) 1024GEN_VXRFORM(vcmpgtuw, 3, 10) 1025GEN_VXRFORM(vcmpgtud, 3, 11) 1026GEN_VXRFORM(vcmpeqfp, 3, 3) 1027GEN_VXRFORM(vcmpgefp, 3, 7) 1028GEN_VXRFORM(vcmpgtfp, 3, 11) 1029GEN_VXRFORM(vcmpbfp, 3, 15) 1030GEN_VXRFORM(vcmpneb, 3, 0) 1031GEN_VXRFORM(vcmpneh, 3, 1) 1032GEN_VXRFORM(vcmpnew, 3, 2) 1033 1034GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \ 1035 vcmpneb, PPC_NONE, PPC2_ISA300) 1036GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \ 1037 vcmpneh, PPC_NONE, PPC2_ISA300) 1038GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \ 1039 vcmpnew, PPC_NONE, PPC2_ISA300) 1040GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \ 1041 vcmpequd, PPC_NONE, PPC2_ALTIVEC_207) 1042GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \ 1043 vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207) 1044GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \ 1045 vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207) 1046 1047static void gen_vsplti(DisasContext *ctx, int vece) 1048{ 1049 int simm; 1050 1051 if (unlikely(!ctx->altivec_enabled)) { 1052 gen_exception(ctx, POWERPC_EXCP_VPU); 1053 return; 1054 } 1055 1056 simm = SIMM5(ctx->opcode); 1057 tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm); 1058} 1059 1060#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \ 1061static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); } 1062 1063GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12); 1064GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13); 1065GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14); 1066 1067#define GEN_VXFORM_NOA(name, opc2, opc3) \ 1068static void glue(gen_, name)(DisasContext *ctx) \ 1069 { \ 1070 TCGv_ptr rb, rd; \ 1071 if (unlikely(!ctx->altivec_enabled)) { \ 1072 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1073 return; \ 1074 } \ 1075 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1076 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1077 gen_helper_##name(rd, rb); \ 1078 tcg_temp_free_ptr(rb); \ 1079 tcg_temp_free_ptr(rd); \ 1080 } 1081 1082#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ 1083static void glue(gen_, name)(DisasContext *ctx) \ 1084 { \ 1085 TCGv_ptr rb, rd; \ 1086 \ 1087 if (unlikely(!ctx->altivec_enabled)) { \ 1088 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1089 return; \ 1090 } \ 1091 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1092 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1093 gen_helper_##name(cpu_env, rd, rb); \ 1094 tcg_temp_free_ptr(rb); \ 1095 tcg_temp_free_ptr(rd); \ 1096 } 1097 1098#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ 1099static void glue(gen_, name)(DisasContext *ctx) \ 1100 { \ 1101 TCGv_ptr rb, rd; \ 1102 if (unlikely(!ctx->altivec_enabled)) { \ 1103 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1104 return; \ 1105 } \ 1106 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1107 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1108 gen_helper_##name(rd, rb); \ 1109 tcg_temp_free_ptr(rb); \ 1110 tcg_temp_free_ptr(rd); \ 1111 } 1112 1113#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ 1114static void glue(gen_, name)(DisasContext *ctx) \ 1115 { \ 1116 TCGv_ptr rb; \ 1117 if (unlikely(!ctx->altivec_enabled)) { \ 1118 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1119 return; \ 1120 } \ 1121 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1122 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \ 1123 tcg_temp_free_ptr(rb); \ 1124 } 1125GEN_VXFORM_NOA(vupkhsb, 7, 8); 1126GEN_VXFORM_NOA(vupkhsh, 7, 9); 1127GEN_VXFORM_NOA(vupkhsw, 7, 25); 1128GEN_VXFORM_NOA(vupklsb, 7, 10); 1129GEN_VXFORM_NOA(vupklsh, 7, 11); 1130GEN_VXFORM_NOA(vupklsw, 7, 27); 1131GEN_VXFORM_NOA(vupkhpx, 7, 13); 1132GEN_VXFORM_NOA(vupklpx, 7, 15); 1133GEN_VXFORM_NOA_ENV(vrefp, 5, 4); 1134GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5); 1135GEN_VXFORM_NOA_ENV(vexptefp, 5, 6); 1136GEN_VXFORM_NOA_ENV(vlogefp, 5, 7); 1137GEN_VXFORM_NOA_ENV(vrfim, 5, 11); 1138GEN_VXFORM_NOA_ENV(vrfin, 5, 8); 1139GEN_VXFORM_NOA_ENV(vrfip, 5, 10); 1140GEN_VXFORM_NOA_ENV(vrfiz, 5, 9); 1141GEN_VXFORM_NOA(vprtybw, 1, 24); 1142GEN_VXFORM_NOA(vprtybd, 1, 24); 1143GEN_VXFORM_NOA(vprtybq, 1, 24); 1144 1145static void gen_vsplt(DisasContext *ctx, int vece) 1146{ 1147 int uimm, dofs, bofs; 1148 1149 if (unlikely(!ctx->altivec_enabled)) { 1150 gen_exception(ctx, POWERPC_EXCP_VPU); 1151 return; 1152 } 1153 1154 uimm = UIMM5(ctx->opcode); 1155 bofs = avr_full_offset(rB(ctx->opcode)); 1156 dofs = avr_full_offset(rD(ctx->opcode)); 1157 1158 /* Experimental testing shows that hardware masks the immediate. */ 1159 bofs += (uimm << vece) & 15; 1160#ifndef HOST_WORDS_BIGENDIAN 1161 bofs ^= 15; 1162 bofs &= ~((1 << vece) - 1); 1163#endif 1164 1165 tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16); 1166} 1167 1168#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \ 1169static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); } 1170 1171#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \ 1172static void glue(gen_, name)(DisasContext *ctx) \ 1173 { \ 1174 TCGv_ptr rb, rd; \ 1175 TCGv_i32 uimm; \ 1176 \ 1177 if (unlikely(!ctx->altivec_enabled)) { \ 1178 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1179 return; \ 1180 } \ 1181 uimm = tcg_const_i32(UIMM5(ctx->opcode)); \ 1182 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1183 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1184 gen_helper_##name(cpu_env, rd, rb, uimm); \ 1185 tcg_temp_free_i32(uimm); \ 1186 tcg_temp_free_ptr(rb); \ 1187 tcg_temp_free_ptr(rd); \ 1188 } 1189 1190#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ 1191static void glue(gen_, name)(DisasContext *ctx) \ 1192 { \ 1193 TCGv_ptr rb, rd; \ 1194 uint8_t uimm = UIMM4(ctx->opcode); \ 1195 TCGv_i32 t0; \ 1196 if (unlikely(!ctx->altivec_enabled)) { \ 1197 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1198 return; \ 1199 } \ 1200 if (uimm > splat_max) { \ 1201 uimm = 0; \ 1202 } \ 1203 t0 = tcg_temp_new_i32(); \ 1204 tcg_gen_movi_i32(t0, uimm); \ 1205 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1206 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1207 gen_helper_##name(rd, rb, t0); \ 1208 tcg_temp_free_i32(t0); \ 1209 tcg_temp_free_ptr(rb); \ 1210 tcg_temp_free_ptr(rd); \ 1211 } 1212 1213GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); 1214GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9); 1215GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10); 1216GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); 1217GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); 1218GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); 1219GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); 1220GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); 1221GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); 1222GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); 1223GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); 1224GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); 1225GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); 1226GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); 1227GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15); 1228GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE, 1229 vextractub, PPC_NONE, PPC2_ISA300); 1230GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, 1231 vextractuh, PPC_NONE, PPC2_ISA300); 1232GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, 1233 vextractuw, PPC_NONE, PPC2_ISA300); 1234GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, 1235 vinsertb, PPC_NONE, PPC2_ISA300); 1236GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, 1237 vinserth, PPC_NONE, PPC2_ISA300); 1238GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, 1239 vinsertw, PPC_NONE, PPC2_ISA300); 1240 1241static void gen_vsldoi(DisasContext *ctx) 1242{ 1243 TCGv_ptr ra, rb, rd; 1244 TCGv_i32 sh; 1245 if (unlikely(!ctx->altivec_enabled)) { 1246 gen_exception(ctx, POWERPC_EXCP_VPU); 1247 return; 1248 } 1249 ra = gen_avr_ptr(rA(ctx->opcode)); 1250 rb = gen_avr_ptr(rB(ctx->opcode)); 1251 rd = gen_avr_ptr(rD(ctx->opcode)); 1252 sh = tcg_const_i32(VSH(ctx->opcode)); 1253 gen_helper_vsldoi(rd, ra, rb, sh); 1254 tcg_temp_free_ptr(ra); 1255 tcg_temp_free_ptr(rb); 1256 tcg_temp_free_ptr(rd); 1257 tcg_temp_free_i32(sh); 1258} 1259 1260#define GEN_VAFORM_PAIRED(name0, name1, opc2) \ 1261static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 1262 { \ 1263 TCGv_ptr ra, rb, rc, rd; \ 1264 if (unlikely(!ctx->altivec_enabled)) { \ 1265 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1266 return; \ 1267 } \ 1268 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1269 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1270 rc = gen_avr_ptr(rC(ctx->opcode)); \ 1271 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1272 if (Rc(ctx->opcode)) { \ 1273 gen_helper_##name1(cpu_env, rd, ra, rb, rc); \ 1274 } else { \ 1275 gen_helper_##name0(cpu_env, rd, ra, rb, rc); \ 1276 } \ 1277 tcg_temp_free_ptr(ra); \ 1278 tcg_temp_free_ptr(rb); \ 1279 tcg_temp_free_ptr(rc); \ 1280 tcg_temp_free_ptr(rd); \ 1281 } 1282 1283GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16) 1284 1285static void gen_vmladduhm(DisasContext *ctx) 1286{ 1287 TCGv_ptr ra, rb, rc, rd; 1288 if (unlikely(!ctx->altivec_enabled)) { 1289 gen_exception(ctx, POWERPC_EXCP_VPU); 1290 return; 1291 } 1292 ra = gen_avr_ptr(rA(ctx->opcode)); 1293 rb = gen_avr_ptr(rB(ctx->opcode)); 1294 rc = gen_avr_ptr(rC(ctx->opcode)); 1295 rd = gen_avr_ptr(rD(ctx->opcode)); 1296 gen_helper_vmladduhm(rd, ra, rb, rc); 1297 tcg_temp_free_ptr(ra); 1298 tcg_temp_free_ptr(rb); 1299 tcg_temp_free_ptr(rc); 1300 tcg_temp_free_ptr(rd); 1301} 1302 1303static void gen_vpermr(DisasContext *ctx) 1304{ 1305 TCGv_ptr ra, rb, rc, rd; 1306 if (unlikely(!ctx->altivec_enabled)) { 1307 gen_exception(ctx, POWERPC_EXCP_VPU); 1308 return; 1309 } 1310 ra = gen_avr_ptr(rA(ctx->opcode)); 1311 rb = gen_avr_ptr(rB(ctx->opcode)); 1312 rc = gen_avr_ptr(rC(ctx->opcode)); 1313 rd = gen_avr_ptr(rD(ctx->opcode)); 1314 gen_helper_vpermr(cpu_env, rd, ra, rb, rc); 1315 tcg_temp_free_ptr(ra); 1316 tcg_temp_free_ptr(rb); 1317 tcg_temp_free_ptr(rc); 1318 tcg_temp_free_ptr(rd); 1319} 1320 1321GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) 1322GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) 1323GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) 1324GEN_VAFORM_PAIRED(vsel, vperm, 21) 1325GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) 1326 1327GEN_VXFORM_NOA(vclzb, 1, 28) 1328GEN_VXFORM_NOA(vclzh, 1, 29) 1329GEN_VXFORM_TRANS(vclzw, 1, 30) 1330GEN_VXFORM_TRANS(vclzd, 1, 31) 1331GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) 1332GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) 1333GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) 1334GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17) 1335GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24) 1336GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25) 1337GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26) 1338GEN_VXFORM_NOA_2(vctzb, 1, 24, 28) 1339GEN_VXFORM_NOA_2(vctzh, 1, 24, 29) 1340GEN_VXFORM_NOA_2(vctzw, 1, 24, 30) 1341GEN_VXFORM_NOA_2(vctzd, 1, 24, 31) 1342GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0) 1343GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1) 1344GEN_VXFORM_NOA(vpopcntb, 1, 28) 1345GEN_VXFORM_NOA(vpopcnth, 1, 29) 1346GEN_VXFORM_NOA(vpopcntw, 1, 30) 1347GEN_VXFORM_NOA(vpopcntd, 1, 31) 1348GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \ 1349 vpopcntb, PPC_NONE, PPC2_ALTIVEC_207) 1350GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \ 1351 vpopcnth, PPC_NONE, PPC2_ALTIVEC_207) 1352GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \ 1353 vpopcntw, PPC_NONE, PPC2_ALTIVEC_207) 1354GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \ 1355 vpopcntd, PPC_NONE, PPC2_ALTIVEC_207) 1356GEN_VXFORM(vbpermd, 6, 23); 1357GEN_VXFORM(vbpermq, 6, 21); 1358GEN_VXFORM_TRANS(vgbbd, 6, 20); 1359GEN_VXFORM(vpmsumb, 4, 16) 1360GEN_VXFORM(vpmsumh, 4, 17) 1361GEN_VXFORM(vpmsumw, 4, 18) 1362GEN_VXFORM(vpmsumd, 4, 19) 1363 1364#define GEN_BCD(op) \ 1365static void gen_##op(DisasContext *ctx) \ 1366{ \ 1367 TCGv_ptr ra, rb, rd; \ 1368 TCGv_i32 ps; \ 1369 \ 1370 if (unlikely(!ctx->altivec_enabled)) { \ 1371 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1372 return; \ 1373 } \ 1374 \ 1375 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1376 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1377 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1378 \ 1379 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ 1380 \ 1381 gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \ 1382 \ 1383 tcg_temp_free_ptr(ra); \ 1384 tcg_temp_free_ptr(rb); \ 1385 tcg_temp_free_ptr(rd); \ 1386 tcg_temp_free_i32(ps); \ 1387} 1388 1389#define GEN_BCD2(op) \ 1390static void gen_##op(DisasContext *ctx) \ 1391{ \ 1392 TCGv_ptr rd, rb; \ 1393 TCGv_i32 ps; \ 1394 \ 1395 if (unlikely(!ctx->altivec_enabled)) { \ 1396 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1397 return; \ 1398 } \ 1399 \ 1400 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1401 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1402 \ 1403 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ 1404 \ 1405 gen_helper_##op(cpu_crf[6], rd, rb, ps); \ 1406 \ 1407 tcg_temp_free_ptr(rb); \ 1408 tcg_temp_free_ptr(rd); \ 1409 tcg_temp_free_i32(ps); \ 1410} 1411 1412GEN_BCD(bcdadd) 1413GEN_BCD(bcdsub) 1414GEN_BCD2(bcdcfn) 1415GEN_BCD2(bcdctn) 1416GEN_BCD2(bcdcfz) 1417GEN_BCD2(bcdctz) 1418GEN_BCD2(bcdcfsq) 1419GEN_BCD2(bcdctsq) 1420GEN_BCD2(bcdsetsgn) 1421GEN_BCD(bcdcpsgn); 1422GEN_BCD(bcds); 1423GEN_BCD(bcdus); 1424GEN_BCD(bcdsr); 1425GEN_BCD(bcdtrunc); 1426GEN_BCD(bcdutrunc); 1427 1428static void gen_xpnd04_1(DisasContext *ctx) 1429{ 1430 switch (opc4(ctx->opcode)) { 1431 case 0: 1432 gen_bcdctsq(ctx); 1433 break; 1434 case 2: 1435 gen_bcdcfsq(ctx); 1436 break; 1437 case 4: 1438 gen_bcdctz(ctx); 1439 break; 1440 case 5: 1441 gen_bcdctn(ctx); 1442 break; 1443 case 6: 1444 gen_bcdcfz(ctx); 1445 break; 1446 case 7: 1447 gen_bcdcfn(ctx); 1448 break; 1449 case 31: 1450 gen_bcdsetsgn(ctx); 1451 break; 1452 default: 1453 gen_invalid(ctx); 1454 break; 1455 } 1456} 1457 1458static void gen_xpnd04_2(DisasContext *ctx) 1459{ 1460 switch (opc4(ctx->opcode)) { 1461 case 0: 1462 gen_bcdctsq(ctx); 1463 break; 1464 case 2: 1465 gen_bcdcfsq(ctx); 1466 break; 1467 case 4: 1468 gen_bcdctz(ctx); 1469 break; 1470 case 6: 1471 gen_bcdcfz(ctx); 1472 break; 1473 case 7: 1474 gen_bcdcfn(ctx); 1475 break; 1476 case 31: 1477 gen_bcdsetsgn(ctx); 1478 break; 1479 default: 1480 gen_invalid(ctx); 1481 break; 1482 } 1483} 1484 1485 1486GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \ 1487 xpnd04_1, PPC_NONE, PPC2_ISA300) 1488GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \ 1489 xpnd04_2, PPC_NONE, PPC2_ISA300) 1490 1491GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \ 1492 bcdadd, PPC_NONE, PPC2_ALTIVEC_207) 1493GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \ 1494 bcdadd, PPC_NONE, PPC2_ALTIVEC_207) 1495GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \ 1496 bcdsub, PPC_NONE, PPC2_ALTIVEC_207) 1497GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \ 1498 bcdsub, PPC_NONE, PPC2_ALTIVEC_207) 1499GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \ 1500 bcdcpsgn, PPC_NONE, PPC2_ISA300) 1501GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \ 1502 bcds, PPC_NONE, PPC2_ISA300) 1503GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ 1504 bcdus, PPC_NONE, PPC2_ISA300) 1505GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ 1506 bcdtrunc, PPC_NONE, PPC2_ISA300) 1507GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ 1508 bcdtrunc, PPC_NONE, PPC2_ISA300) 1509GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ 1510 bcdutrunc, PPC_NONE, PPC2_ISA300) 1511 1512 1513static void gen_vsbox(DisasContext *ctx) 1514{ 1515 TCGv_ptr ra, rd; 1516 if (unlikely(!ctx->altivec_enabled)) { 1517 gen_exception(ctx, POWERPC_EXCP_VPU); 1518 return; 1519 } 1520 ra = gen_avr_ptr(rA(ctx->opcode)); 1521 rd = gen_avr_ptr(rD(ctx->opcode)); 1522 gen_helper_vsbox(rd, ra); 1523 tcg_temp_free_ptr(ra); 1524 tcg_temp_free_ptr(rd); 1525} 1526 1527GEN_VXFORM(vcipher, 4, 20) 1528GEN_VXFORM(vcipherlast, 4, 20) 1529GEN_VXFORM(vncipher, 4, 21) 1530GEN_VXFORM(vncipherlast, 4, 21) 1531 1532GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207, 1533 vcipherlast, PPC_NONE, PPC2_ALTIVEC_207) 1534GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207, 1535 vncipherlast, PPC_NONE, PPC2_ALTIVEC_207) 1536 1537#define VSHASIGMA(op) \ 1538static void gen_##op(DisasContext *ctx) \ 1539{ \ 1540 TCGv_ptr ra, rd; \ 1541 TCGv_i32 st_six; \ 1542 if (unlikely(!ctx->altivec_enabled)) { \ 1543 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1544 return; \ 1545 } \ 1546 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1547 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1548 st_six = tcg_const_i32(rB(ctx->opcode)); \ 1549 gen_helper_##op(rd, ra, st_six); \ 1550 tcg_temp_free_ptr(ra); \ 1551 tcg_temp_free_ptr(rd); \ 1552 tcg_temp_free_i32(st_six); \ 1553} 1554 1555VSHASIGMA(vshasigmaw) 1556VSHASIGMA(vshasigmad) 1557 1558GEN_VXFORM3(vpermxor, 22, 0xFF) 1559GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, 1560 vpermxor, PPC_NONE, PPC2_ALTIVEC_207) 1561 1562#undef GEN_VR_LDX 1563#undef GEN_VR_STX 1564#undef GEN_VR_LVE 1565#undef GEN_VR_STVE 1566 1567#undef GEN_VX_LOGICAL 1568#undef GEN_VX_LOGICAL_207 1569#undef GEN_VXFORM 1570#undef GEN_VXFORM_207 1571#undef GEN_VXFORM_DUAL 1572#undef GEN_VXRFORM_DUAL 1573#undef GEN_VXRFORM1 1574#undef GEN_VXRFORM 1575#undef GEN_VXFORM_VSPLTI 1576#undef GEN_VXFORM_NOA 1577#undef GEN_VXFORM_UIMM 1578#undef GEN_VAFORM_PAIRED 1579 1580#undef GEN_BCD2